├── .drone.yml ├── .gitattributes ├── .github └── dependabot.yml ├── .gitignore ├── .golangci.yml ├── LICENSE ├── Makefile ├── README.md ├── build ├── .drone.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── bin │ └── po-diff.sh ├── images │ └── build │ │ ├── Dockerfile │ │ ├── Makefile │ │ ├── build-scripts │ │ └── install-packages.sh │ │ └── root │ │ ├── build │ │ ├── rsyncd.sh │ │ └── run.sh │ │ └── usr │ │ └── local │ │ └── bin │ │ ├── docker-entrypoint.sh │ │ ├── gh │ │ ├── publish-helm-chart.sh │ │ ├── setup-credentials-helper.sh │ │ ├── setup-github-credentials.sh │ │ └── xvfb-chrome └── makelib │ ├── cache.mk │ ├── common.mk │ ├── gcp.mk │ ├── gettext.mk │ ├── git-publish.mk │ ├── golang.mk │ ├── helm.mk │ ├── image.mk │ ├── k8s-tools.mk │ ├── kubebuilder.mk │ ├── nodejs.mk │ ├── php.mk │ ├── protobuf.mk │ ├── react.mk │ ├── utils.mk │ └── wordpress.mk ├── doc.go ├── go.mod ├── go.sum ├── hack └── boilerplate.go.txt └── pkg ├── beat └── beat.go ├── log ├── adapters │ └── lager │ │ └── lager.go ├── log.go └── log_test.go ├── mergo └── transformers │ ├── transformers.go │ ├── transformers_suite_test.go │ └── transformers_test.go ├── meta ├── finalizer_test.go ├── finalizers.go └── meta_suite_test.go ├── net └── net.go ├── predicate ├── class.go ├── class_test.go └── suite_test.go ├── rand └── rand.go ├── rate-limiter └── rate_limiter.go └── syncer ├── example_test.go ├── external.go ├── interface.go ├── object.go ├── object_test.go ├── remove_resource.go ├── remove_resource_test.go ├── syncer.go ├── syncer_suite_test.go └── syncer_test.go /.drone.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: pipeline 3 | name: default 4 | 5 | clone: 6 | disable: true 7 | 8 | workspace: 9 | base: /workspace 10 | path: src/github.com/presslabs/controller-util 11 | 12 | steps: 13 | - name: git 14 | pull: default 15 | image: plugins/git 16 | settings: 17 | depth: 0 18 | tags: true 19 | 20 | - name: dependencies 21 | image: presslabs/build:stable 22 | pull: always 23 | commands: 24 | - make build.tools 25 | 26 | - name: lint 27 | image: presslabs/build:stable 28 | commands: 29 | - make lint 30 | 31 | - name: test 32 | image: presslabs/build:stable 33 | commands: 34 | - make test 35 | 36 | trigger: 37 | ref: 38 | exclude: 39 | - refs/heads/dependabot/** 40 | 41 | --- 42 | kind: signature 43 | hmac: f55df7a519243e9a5e52b55fb2ec90ea65d2f21c3a79f46c617c56c4255d02c8 44 | 45 | ... 46 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | build/** linguist-vendored 2 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "monthly" 12 | open-pull-requests-limit: 100 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | bin 15 | 16 | .idea 17 | 18 | .cache 19 | _output 20 | .work 21 | 22 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | linters-settings: 2 | dupl: 3 | threshold: 400 4 | gocyclo: 5 | min-complexity: 10 6 | cyclop: 7 | max-complexity: 10 8 | govet: 9 | shadow: true 10 | lll: 11 | line-length: 170 12 | tab-width: 4 13 | 14 | linters: 15 | presets: 16 | - bugs 17 | - unused 18 | - format 19 | - style 20 | - complexity 21 | - performance 22 | 23 | # we should re-enable them and make lint pass 24 | disable: 25 | - goimports 26 | - gochecknoglobals 27 | - exhaustruct 28 | - wrapcheck 29 | - gomoddirectives 30 | - godox 31 | - varnamelen 32 | - ireturn 33 | - paralleltest 34 | - testpackage 35 | - gci 36 | 37 | issues: 38 | max-same-issues: 0 39 | exclude-use-default: false 40 | exclude: 41 | # gosec G104, about unhandled errors. We do that with errcheck already 42 | - "G104: Errors unhandled" 43 | exclude-rules: 44 | - linters: 45 | # Ignore package comments (ST1000) since most of the time are irrelevant 46 | - stylecheck 47 | text: "ST1000" 48 | 49 | - linters: 50 | - revive 51 | text: "package-comments: should have a package comment" 52 | 53 | - linters: 54 | - typecheck 55 | path: "../../../../../usr/local/go/src/slices/sort.go" 56 | 57 | - linters: 58 | - depguard 59 | text: "not allowed from list 'Main'" 60 | 61 | - linters: 62 | - revive 63 | text: "should not use dot imports" 64 | path: '(.*)_test\.go' 65 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Project Setup 2 | PROJECT_NAME := controller-util 3 | PROJECT_REPO := github.com/presslabs/$(PROJECT_NAME) 4 | 5 | PLATFORMS = linux_amd64 darwin_amd64 6 | 7 | GO_SUBDIRS := pkg 8 | 9 | GO111MODULE=on 10 | 11 | GO_STATIC_PACKAGES = $(GO_PROJECT)/cmd/wp-operator 12 | GO_LDFLAGS += -X $(PROJECT_REPO)/pkg/version.buildDate=$(BUILD_DATE) \ 13 | -X $(PROJECT_REPO)/pkg/version.gitVersion=$(VERSION) \ 14 | -X $(PROJECT_REPO)/pkg/version.gitCommit=$(GIT_COMMIT) \ 15 | -X $(PROJECT_REPO)/pkg/version.gitTreeState=$(GIT_TREE_STATE) 16 | 17 | include build/makelib/common.mk 18 | include build/makelib/golang.mk 19 | include build/makelib/kubebuilder.mk 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Presslabs Logo](https://www.presslabs.com/logo.png) 2 | 3 | # controller-util 4 | 5 | [![Build Status](https://ci.presslabs.net/api/badges/presslabs/controller-util/status.svg)](https://ci.presslabs.net/presslabs/controller-util) 6 | [![GoDoc](https://godoc.org/github.com/presslabs/controller-util?status.svg)](https://godoc.org/github.com/presslabs/controller-util) 7 | 8 | Utility code for writing kubernetes controllers using kubebuilder and controller-runtime 9 | -------------------------------------------------------------------------------- /build/.drone.yml: -------------------------------------------------------------------------------- 1 | kind: pipeline 2 | name: default 3 | 4 | clone: 5 | disable: true 6 | 7 | workspace: 8 | base: /workspace 9 | path: src/github.com/presslabs/build 10 | 11 | steps: 12 | - name: clone 13 | image: plugins/git 14 | settings: 15 | depth: 0 16 | tags: true 17 | 18 | - name: install dependencies 19 | image: presslabs/build:stable 20 | commands: 21 | - make -j4 build.tools 22 | 23 | - name: build 24 | pull: true 25 | image: presslabs/build:stable 26 | environment: 27 | DOCKER_HOST: "unix:///workspace/docker.sock" 28 | commands: 29 | - make -j4 build 30 | 31 | - name: publish 32 | image: presslabs/build:stable 33 | commands: 34 | - /usr/local/bin/setup-credentials-helper.sh 35 | - make publish 36 | environment: 37 | DOCKER_HOST: "unix:///workspace/docker.sock" 38 | DOCKER_USERNAME: presslabsbot 39 | DOCKER_PASSWORD: 40 | from_secret: DOCKERHUB_TOKEN 41 | when: 42 | ref: 43 | - refs/heads/master 44 | - refs/heads/release-* 45 | 46 | services: 47 | - name: docker 48 | image: docker:dind 49 | privileged: true 50 | commands: 51 | - /usr/local/bin/dockerd-entrypoint.sh dockerd --host "unix:///workspace/docker.sock" --storage-driver overlay2 --log-level error 52 | 53 | 54 | trigger: 55 | ref: 56 | - refs/pull/** 57 | - refs/heads/** 58 | event: 59 | exclude: 60 | - promote 61 | - tag 62 | 63 | --- 64 | kind: pipeline 65 | name: promote 66 | 67 | clone: 68 | disable: true 69 | 70 | workspace: 71 | base: /workspace 72 | path: src/github.com/presslabs/build 73 | 74 | steps: 75 | - name: clone 76 | image: plugins/git 77 | settings: 78 | depth: 0 79 | tags: true 80 | 81 | - name: promote 82 | image: presslabs/build:stable 83 | commands: 84 | - /usr/local/bin/setup-credentials-helper.sh 85 | - make promote CHANNEL=${DRONE_DEPLOY_TO} PROMOTE_IMAGE_TAG=${PUBLISH_TAG} 86 | - '[ "$PUBLISH_TAG" = "" ] || make tag VERSION=${PUBLISH_TAG}' 87 | environment: 88 | DOCKER_HOST: "unix:///workspace/docker.sock" 89 | DOCKER_USERNAME: presslabsbot 90 | DOCKER_PASSWORD: 91 | from_secret: DOCKERHUB_TOKEN 92 | 93 | trigger: 94 | event: 95 | - promote 96 | -------------------------------------------------------------------------------- /build/.gitignore: -------------------------------------------------------------------------------- 1 | _output 2 | .cache 3 | .work 4 | /bin/ 5 | .idea 6 | 7 | .#* 8 | -------------------------------------------------------------------------------- /build/LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /build/Makefile: -------------------------------------------------------------------------------- 1 | # Project Setup 2 | PROJECT_NAME := presslabs-build 3 | PROJECT_REPO := github.com/presslabs/build 4 | 5 | PLATFORMS = linux_amd64 6 | 7 | # this is required, since by default, the makelib files are under a ./build path prefix, but here, 8 | # they are under root 9 | ROOT_DIR := $(abspath $(shell cd ./ && pwd -P)) 10 | 11 | include makelib/common.mk 12 | 13 | IMAGES ?= build 14 | DOCKER_REGISTRY ?= presslabs 15 | 16 | include makelib/image.mk 17 | -------------------------------------------------------------------------------- /build/README.md: -------------------------------------------------------------------------------- 1 | # build 2 | Presslabs GNU make based build system 3 | 4 | ## Goals 5 | 6 | 1. Allow building locally the same way the project is build on CI 7 | 2. Provide a sane test, build, publish flow 8 | 3. Provide stable toolchain for building (eg. pinned tool versions) 9 | 4. Enables caching for speeding up builds. 10 | 11 | ## Quickstart 12 | 13 | To include `build/` to your project run: 14 | 15 | ```sh 16 | git subtree add --squash -P build https://github.com/presslabs/build.git master 17 | 18 | cat < Makefile 19 | # Project Setup 20 | PROJECT_NAME := mysql-operator 21 | PROJECT_REPO := github.com/presslabs/mysql-operator 22 | 23 | include build/makelib/common.mk 24 | ``` 25 | 26 | ### Pull new changes 27 | 28 | ```sh 29 | git subtree pull --squash -P build https://github.com/presslabs/build.git master 30 | ``` 31 | 32 | ### Push back changes 33 | 34 | An [workaround](https://github.com/rust-lang/rust-clippy/issues/5565#issuecomment-623489754) on how 35 | to bypass the segfault of git subtree command on repose with more commits. 36 | 37 | ```sh 38 | ulimit -s 60000 # workaround to fix segfault 39 | 40 | git subtree push -P build/ git@github.com:presslabs/build.git 41 | ``` 42 | 43 | ## Development workflow 44 | 45 | The image publishing will work as follows: 46 | 47 | On a feature branch (e.g. `feat-*`): 48 | * Drone build runs without image publishing 49 | * Can't trigger a promotion (it will fail) 50 | 51 | On a release branch (e.i. `release-*` or `master`): 52 | * Drone build will publish images using git-semver using the following tags: `$(git-semver)`, `$(git-semver)-$ARCH` 53 | * Manually can promote to the following channels (`$CHANNEL`): `stable`, `beta`, `alpha`, `master` 54 | * *On promote*: the images are published with the following tags: `$CHANNEL`, `$CHANNEL-$(git-semver)`, `$(git-semver)`, `$(git-semver)-$ARCH` 55 | * *On promote* and parameter `PUBLISH_TAG` is set: a new git tag will be created and images will be published under the following tags: `$CHANNEL`, `$CHANNEL-$PUBLISH_TAG`, `$PUBLISH_TAG` (if channel is `stable`). 56 | 57 | On git tag event the CI will not run. 58 | 59 | ## Usage 60 | 61 | ``` 62 | Usage: make [make-options] [options] 63 | 64 | Common Targets: 65 | build Build source code and other artifacts for host platform. 66 | build.all Build source code and other artifacts for all platforms. 67 | build.tools Install the required build tools. 68 | clean Remove all files created during the build. 69 | distclean Remove all files created during the build including cached tools. 70 | generate Run code generation tools. 71 | fmt Run code auto-formatting tools. 72 | lint Run lint and code analysis tools. 73 | test Runs unit tests. 74 | e2e Runs end-to-end integration tests. 75 | translate Collect translation strings and post-process the .pot/.po files. 76 | help Show this help info. 77 | ``` 78 | 79 | ## Acknowledgement 80 | 81 | This work is based on https://github.com/bitpoke/build 82 | This work is based on https://github.com/upbound/build. 83 | -------------------------------------------------------------------------------- /build/bin/po-diff.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2019 Pressinfra SRL 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | export ROOT_DIR=$(dirname "${BASH_SOURCE}")/../.. 22 | 23 | # Install tools we need, but only from vendor/... 24 | cd "${ROOT_DIR}" 25 | 26 | diff -u \ 27 | <(grep -E '^msgid' "${1}" | sort | sed 's/msgid[[:space:]]*//g') \ 28 | <(grep -E '^msgid' "${2}" | sort | sed 's/msgid[[:space:]]*//g') 29 | 30 | exit 0 31 | -------------------------------------------------------------------------------- /build/images/build/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Upbound Authors. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | FROM google/cloud-sdk:429.0.0 16 | 17 | ARG ARCH 18 | 19 | ENV DEBIAN_FRONTEND noninteractive 20 | ENV PYTHONUNBUFFERED 1 21 | 22 | RUN set -ex \ 23 | && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 24 | 25 | COPY build-scripts /usr/local/build-scripts 26 | 27 | COPY --from=docker/buildx-bin:0.6.1 /buildx /usr/libexec/docker/cli-plugins/docker-buildx 28 | RUN docker buildx version 29 | 30 | # ------------------------------------------------------------------------------------------------ 31 | # python 32 | RUN set -ex \ 33 | && apt update \ 34 | && apt install -y git openssl python3 python3-pip make curl libstdc++6 ca-certificates wget coreutils \ 35 | && pip3 install zipa 36 | 37 | # ------------------------------------------------------------------------------------------------ 38 | # kubectl 39 | ENV KUBECTL_VERSION 1.26.4 40 | RUN wget -q https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl -O/usr/local/bin/kubectl \ 41 | && chmod 0755 /usr/local/bin/kubectl \ 42 | && chown root:root /usr/local/bin/kubectl 43 | 44 | # ------------------------------------------------------------------------------------------------ 45 | # install build and release tools 46 | RUN /usr/local/build-scripts/install-packages.sh \ 47 | apt-transport-https \ 48 | gettext \ 49 | jq \ 50 | lsb-release \ 51 | make \ 52 | rsync \ 53 | runit \ 54 | sudo \ 55 | wget \ 56 | zip 57 | # ------------------------------------------------------------------------------------------------ 58 | # PHP 59 | RUN sh -c 'echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list' \ 60 | && wget -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg \ 61 | && /usr/local/build-scripts/install-packages.sh \ 62 | php7.4-bcmath \ 63 | php7.4-curl \ 64 | php7.4-cli \ 65 | php7.4-fpm \ 66 | php7.4-gd \ 67 | php7.4-mbstring \ 68 | php7.4-mysql \ 69 | php7.4-opcache \ 70 | php7.4-tidy \ 71 | php7.4-xml \ 72 | php7.4-xmlrpc \ 73 | php7.4-xsl \ 74 | php7.4-zip \ 75 | php-apcu \ 76 | php-apcu-bc \ 77 | php-geoip \ 78 | php-imagick \ 79 | php-memcached \ 80 | php-redis \ 81 | php-sodium \ 82 | php-yaml \ 83 | && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 84 | 85 | # ------------------------------------------------------------------------------------------------ 86 | # git config 87 | RUN git config --global user.email "bot@presslabs.com" \ 88 | && git config --global user.name "RoBot" \ 89 | && git config --global diff.tar-filelist.binary true \ 90 | && git config --global diff.tar-filelist.textconv 'tar -tvf' \ 91 | && git config --global diff.tar.binary true \ 92 | && git config --global diff.tar.textconv 'tar -xvOf' 93 | 94 | # ------------------------------------------------------------------------------------------------ 95 | # Go support 96 | RUN GO_VERSION=1.24.0 && \ 97 | GO_HASH=dea9ca38a0b852a74e81c26134671af7c0fbe65d81b0dc1c5bfe22cf7d4c8858 && \ 98 | curl -fsSL https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz -o golang.tar.gz && \ 99 | echo "${GO_HASH} golang.tar.gz" | sha256sum -c - && \ 100 | tar -C /usr/local -xzf golang.tar.gz && \ 101 | rm golang.tar.gz 102 | ENV GOPATH /workspace 103 | ENV PATH /workspace/bin:/usr/local/go/bin:$PATH 104 | 105 | # precompile the go standard library for all supported platforms and configurations 106 | # the install suffixes match those in golang.mk so please keep them in sync 107 | RUN platforms="darwin_amd64 windows_amd64 linux_amd64 linux_arm64" && \ 108 | for p in $platforms; do CGO_ENABLED=0 GOOS=${p%_*} GOARCH=${p##*_} GOARM=7 go install -installsuffix static -a std; done 109 | 110 | # ------------------------------------------------------------------------------------------------ 111 | # Node JS and chrome support 112 | RUN curl -fsSL https://deb.nodesource.com/setup_14.x | bash - && \ 113 | curl -fsSL https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \ 114 | echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google.list && \ 115 | /usr/local/build-scripts/install-packages.sh \ 116 | nodejs \ 117 | google-chrome-stable \ 118 | xvfb && \ 119 | rm -f /etc/apt/sources.list.d/google.list && \ 120 | ln -fs /usr/local/bin/xvfb-chrome /usr/bin/google-chrome && \ 121 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 122 | ENV CHROME_BIN /usr/bin/google-chrome 123 | 124 | # ------------------------------------------------------------------------------------------------ 125 | # Yarn 126 | RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - && \ 127 | echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list && \ 128 | /usr/local/build-scripts/install-packages.sh \ 129 | yarn && \ 130 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 131 | 132 | # ------------------------------------------------------------------------------------------------ 133 | # rclone 134 | RUN set -ex \ 135 | && export RCLONE_VERSION=1.62.2 \ 136 | && curl -sL -o rclone-v${RCLONE_VERSION}-linux-amd64.deb https://github.com/rclone/rclone/releases/download/v${RCLONE_VERSION}/rclone-v${RCLONE_VERSION}-linux-amd64.deb \ 137 | && dpkg -i rclone-v${RCLONE_VERSION}-linux-amd64.deb \ 138 | && rm rclone-v${RCLONE_VERSION}-linux-amd64.deb \ 139 | && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 140 | 141 | # ------------------------------------------------------------------------------------------------ 142 | # dockerize 143 | RUN set -ex \ 144 | && export DOCKERIZE_VERSION="2.1.0" \ 145 | && curl -sL -o dockerize.tar.gz "https://github.com/presslabs/dockerize/releases/download/v${DOCKERIZE_VERSION}/dockerize-linux-amd64-v${DOCKERIZE_VERSION}.tar.gz" \ 146 | && tar -C /usr/local/bin -xzvf dockerize.tar.gz \ 147 | && rm dockerize.tar.gz \ 148 | && chmod 0755 /usr/local/bin/dockerize \ 149 | && chown root:root /usr/local/bin/dockerize 150 | 151 | # ------------------------------------------------------------------------------------------------ 152 | # sops 153 | RUN set -ex \ 154 | && export SOPS_VERSION="3.7.1" \ 155 | && curl -sL -o /usr/local/bin/sops "https://github.com/mozilla/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.linux" \ 156 | && chmod 0755 /usr/local/bin/sops \ 157 | && chown root:root /usr/local/bin/sops 158 | 159 | # ------------------------------------------------------------------------------------------------ 160 | # helm 161 | RUN set -ex \ 162 | && export HELM_VERSION="3.11.3" \ 163 | && curl -sL -o helm.tar.gz "https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz" \ 164 | && tar -C /usr/local/bin -xzvf helm.tar.gz --strip-components 1 linux-amd64/helm \ 165 | && rm helm.tar.gz \ 166 | && chmod 0755 /usr/local/bin/helm \ 167 | && chown root:root /usr/local/bin/helm 168 | 169 | # ------------------------------------------------------------------------------------------------ 170 | # helm secrets plugin 171 | RUN set -ex \ 172 | && export HELM_SECRETS_VERSION="3.8.3" \ 173 | && helm plugin install https://github.com/jkroepke/helm-secrets --version ${HELM_SECRETS_VERSION} 174 | 175 | # ------------------------------------------------------------------------------------------------ 176 | # kustomize 177 | RUN set -ex \ 178 | && export KUSTOMIZE_VERSION="4.5.4" \ 179 | && curl -sL -o kustomize.tar.gz "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v${KUSTOMIZE_VERSION}/kustomize_v${KUSTOMIZE_VERSION}_linux_amd64.tar.gz" \ 180 | && tar -C /usr/local/bin -xzvf kustomize.tar.gz \ 181 | && rm kustomize.tar.gz \ 182 | && chmod 0755 /usr/local/bin/kustomize \ 183 | && chown root:root /usr/local/bin/kustomize 184 | 185 | # ------------------------------------------------------------------------------------------------ 186 | # docker-compose 187 | RUN set -ex \ 188 | && export DOCKER_COMPOSE_VERSION="1.29.2" \ 189 | && curl -sL -o /usr/local/bin/docker-compose "https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-Linux-x86_64" \ 190 | && chmod +x /usr/local/bin/docker-compose \ 191 | && chown root:root /usr/local/bin/docker 192 | 193 | # ------------------------------------------------------------------------------------------------ 194 | # un tini as PID 1 and avoid signal handling issues 195 | RUN set -ex \ 196 | && export TINI_VERSION=v0.19.0 \ 197 | && curl -sL -o /tini https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${ARCH} \ 198 | && chmod +x /tini 199 | 200 | # ------------------------------------------------------------------------------------------------ 201 | # yq 202 | RUN set -ex \ 203 | && export YQ_VERSION=4.24.5 \ 204 | && curl -sL -o /usr/local/bin/yq "https://github.com/mikefarah/yq/releases/download/v${YQ_VERSION}/yq_linux_amd64" \ 205 | && chmod 0755 /usr/local/bin/yq \ 206 | && chown root:root /usr/local/bin/yq 207 | 208 | COPY root / 209 | 210 | ENV USE_GKE_GCLOUD_AUTH_PLUGIN True 211 | ENV GOOGLE_APPLICATION_CREDENTIALS /run/google-credentials.json 212 | 213 | ENTRYPOINT [ "/tini", "-g", "--", "/build/run.sh" ] 214 | -------------------------------------------------------------------------------- /build/images/build/Makefile: -------------------------------------------------------------------------------- 1 | PLATFORMS := linux_amd64 2 | include ../../makelib/common.mk 3 | 4 | # this is required, since by default, the makelib files are under a ./build path prefix, but here, 5 | # they are under root 6 | ROOT_DIR := $(abspath $(shell cd ./../.. && pwd -P)) 7 | 8 | IMAGE = $(BUILD_REGISTRY)/build-$(ARCH) 9 | CACHE_IMAGES = $(IMAGE) 10 | include ../../makelib/image.mk 11 | 12 | img.build: 13 | @$(INFO) docker build $(IMAGE) $(IMAGE_PLATFORM) 14 | @cp -La . $(IMAGE_TEMP_DIR) 15 | @mkdir -p $(IMAGE_TEMP_DIR)/rootfs 16 | @docker buildx build $(BUILD_ARGS) \ 17 | --platform $(IMAGE_PLATFORM) \ 18 | -t $(IMAGE) \ 19 | --build-arg ARCH=$(ARCH) \ 20 | $(IMAGE_TEMP_DIR) 21 | @$(OK) docker build $(IMAGE) 22 | -------------------------------------------------------------------------------- /build/images/build/build-scripts/install-packages.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2019 Pressinfra 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | apt-get update 22 | apt-get install -yy -q --no-install-recommends "${@}" 23 | apt-get clean 24 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 25 | -------------------------------------------------------------------------------- /build/images/build/root/build/rsyncd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Copyright 2016 The Upbound Authors. All rights reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | VOLUME=${VOLUME:-/volume} 18 | ALLOW=${ALLOW:-192.168.0.0/16 172.16.0.0/12 10.0.0.0/8} 19 | OWNER=${OWNER:-nobody} 20 | GROUP=${GROUP:-nogroup} 21 | 22 | if [[ "${GROUP}" != "nogroup" && "${GROUP}" != "root" ]]; then 23 | groupadd -g ${GROUP} rsync 24 | fi 25 | 26 | if [[ "${OWNER}" != "nobody" && "${OWNER}" != "root" ]]; then 27 | groupadd -u ${OWNER} -G rsync rsync 28 | fi 29 | 30 | chown "${OWNER}:${GROUP}" "${VOLUME}" 31 | 32 | [ -f /etc/rsyncd.conf ] || cat < /etc/rsyncd.conf 33 | uid = ${OWNER} 34 | gid = ${GROUP} 35 | use chroot = yes 36 | log file = /dev/stdout 37 | reverse lookup = no 38 | [volume] 39 | hosts deny = * 40 | hosts allow = ${ALLOW} 41 | read only = false 42 | path = ${VOLUME} 43 | comment = volume 44 | EOF 45 | 46 | for dir in ${MKDIRS}; do 47 | mkdir -p ${dir} 48 | chown "${OWNER}:${GROUP}" ${dir} 49 | done 50 | 51 | exec /usr/bin/rsync --no-detach --daemon --config /etc/rsyncd.conf "$@" 52 | -------------------------------------------------------------------------------- /build/images/build/root/build/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Copyright 2016 The Upbound Authors. All rights reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | ARGS="$@" 18 | if [ $# -eq 0 ]; then 19 | ARGS=/bin/bash 20 | fi 21 | 22 | BUILDER_USER=${BUILDER_USER:-upbound} 23 | BUILDER_GROUP=${BUILDER_GROUP:-upbound} 24 | BUILDER_UID=${BUILDER_UID:-1000} 25 | BUILDER_GID=${BUILDER_GID:-1000} 26 | 27 | groupadd -o -g $BUILDER_GID $BUILDER_GROUP 2> /dev/null 28 | useradd -o -m -g $BUILDER_GID -u $BUILDER_UID $BUILDER_USER 2> /dev/null 29 | echo "$BUILDER_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers 30 | export HOME=/home/${BUILDER_USER} 31 | echo "127.0.0.1 $(cat /etc/hostname)" >> /etc/hosts 32 | [[ -S /var/run/docker.sock ]] && chmod 666 /var/run/docker.sock 33 | chown -R $BUILDER_UID:$BUILDER_GID $HOME 34 | exec chpst -u :$BUILDER_UID:$BUILDER_GID ${ARGS} 35 | -------------------------------------------------------------------------------- /build/images/build/root/usr/local/bin/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | source /usr/local/bin/setup-credentials-helper.sh 4 | 5 | if [ -z "$1" ] ; then 6 | exec "/bin/sh" 7 | else 8 | exec "$@" 9 | fi 10 | -------------------------------------------------------------------------------- /build/images/build/root/usr/local/bin/gh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import base64 4 | import io 5 | import logging 6 | import os 7 | import sys 8 | 9 | from requests import auth, exceptions 10 | from zipa import api_github_com as gh 11 | 12 | def repo_path(path): 13 | parts = list(filter(len, path.split('/', 2))) 14 | 15 | if len(parts) != 3: 16 | raise argparse.ArgumentTypeError('path must be the form :owner/:repo/path') 17 | 18 | return argparse.Namespace(owner=parts[0], repo=parts[1], path=parts[2]) 19 | 20 | def bin_open(path): 21 | return open(path, 'rb') 22 | 23 | def parser(): 24 | parser = argparse.ArgumentParser(prog='gh', description='Github CLI utilities') 25 | 26 | parser.add_argument('--log', '-l', default='INFO', 27 | help='Log verbosity level. (default: INFO)') 28 | 29 | parser.add_argument('--gh-user', '-u', 30 | default=os.getenv('GH_USER', ''), 31 | help='GitHub username. (defaults to environment variable GH_USER)') 32 | parser.add_argument('--gh-password', '-p', 33 | default=os.getenv('GH_PASSWORD', ''), 34 | help='GitHub username. (defaults to environment variable GH_PASSWORD)') 35 | 36 | subparsers = parser.add_subparsers(title='subcommands', 37 | description='Available subcommands', 38 | dest='command') 39 | 40 | parser_put = subparsers.add_parser('put', help='Creates or updates a file in a github repo') 41 | parser_get = subparsers.add_parser('get', help='Gets the file content of a github repo') 42 | parser_delete = subparsers.add_parser('delete', help='Deletes a file from a github repo') 43 | for subparser in [parser_put]: 44 | subparser.add_argument('src', type=bin_open, 45 | help='Source file path') 46 | subparser.add_argument('--overwrite', '-w', action='store_true', 47 | help='Allow overwritting existing files (default: false)') 48 | subparser.add_argument('--skip-existing', action='store_true', 49 | help='If destination exists continue without error (default: false)') 50 | 51 | for subparser in [parser_put, parser_get, parser_delete]: 52 | subparser.add_argument('resource', type=repo_path, 53 | metavar='dest', 54 | help='GitHub filepath to operate upon ' 55 | '(eg. octocat/hello-world/README.md)') 56 | subparser.add_argument('--branch', default='master', 57 | help='Repository branch to operate (default: master)') 58 | subparser.add_argument('--successfull-codes', '-c', default=['200'], 59 | metavar='code', action='append', 60 | help='Consider successfull these http codes (default: 200)') 61 | 62 | for subparser in [parser_put, parser_delete]: 63 | subparser.add_argument('--message', '-m', 64 | help='Commit messag') 65 | 66 | 67 | return parser 68 | 69 | def _get_file_content(opts): 70 | repo = gh.repos[opts.resource.owner][opts.resource.repo] 71 | content = repo.contents[_get_dest_path(opts)](ref=opts.branch) 72 | 73 | if type(content) is list: 74 | raise TypeError("Getting a directory is not supported") 75 | elif content.get('type') == 'submodule': 76 | raise TypeError("Getting a submodule is not supported") 77 | elif content.get('type') == 'symlink': 78 | raise TypeError("Getting a symlink is not supported") 79 | 80 | return content 81 | 82 | def _get_dest_path(opts): 83 | if opts.resource.path.endswith('/') and opts.src: 84 | path = os.path.join(opts.resource.path, os.path.basename(opts.src.name)) 85 | else: 86 | path = opts.resource.path 87 | 88 | return path 89 | 90 | def gh_file_get(opts): 91 | repo = gh.repos[opts.resource.owner][opts.resource.repo] 92 | try: 93 | content = _get_file_content(opts) 94 | except exceptions.HTTPError as e: 95 | if int(e.response.status_code) not in map(int, opts.successfull_codes): 96 | raise 97 | 98 | base64.decode(io.StringIO(content.content), sys.stdout.buffer) 99 | 100 | def gh_file_delete(opts): 101 | message = opts.message or f'Delete {opts.resource.path}' 102 | repo = gh.repos[opts.resource.owner][opts.resource.repo] 103 | old_content = _get_file_content(opts) 104 | content = repo.contents[opts.resource.path].delete(sha=old_content.sha, 105 | branch=opts.branch, 106 | message=message) 107 | logging.info('%s successfully deleted', _get_dest_path(opts)) 108 | 109 | def gh_file_put(opts): 110 | repo = gh.repos[opts.resource.owner][opts.resource.repo] 111 | path = _get_dest_path(opts) 112 | 113 | try: 114 | old_content = _get_file_content(opts) 115 | exists = True 116 | except exceptions.HTTPError as e: 117 | if int(e.response.status_code) == 404: 118 | exists = False 119 | else: 120 | raise 121 | 122 | kwargs = { 123 | 'branch': opts.branch, 124 | 'content': base64.b64encode(opts.src.read()).decode() 125 | } 126 | if exists: 127 | if opts.skip_existing: 128 | logging.info('%s already exists. Skipping.', path) 129 | return 130 | if not opts.overwrite: 131 | raise RuntimeError(f'{opts.resource.owner}/{opts.resource.repo}{path} already exists') 132 | kwargs['message'] = opts.message or f'Update {path}' 133 | kwargs['sha'] = old_content.sha 134 | else: 135 | kwargs['message'] = opts.message or f'Create {path}' 136 | 137 | try: 138 | repo.contents[path].put(**kwargs) 139 | except exceptions.HTTPError as e: 140 | if int(e.response.status_code) not in map(int, opts.successfull_codes): 141 | raise 142 | 143 | logging.info('%s successfully %s', path, 'updated' if exists else 'created') 144 | 145 | def main(): 146 | args = parser().parse_args() 147 | if args.command is None: 148 | parser().print_help(sys.stderr) 149 | sys.exit(1) 150 | 151 | if args.gh_user and args.gh_password: 152 | gh.config.auth = auth.HTTPBasicAuth(args.gh_user, args.gh_password) 153 | 154 | logging.basicConfig(stream=sys.stderr, level=args.log.upper()) 155 | 156 | if args.command == 'get': 157 | gh_file_get(args) 158 | elif args.command == 'put': 159 | gh_file_put(args) 160 | elif args.command == 'delete': 161 | gh_file_delete(args) 162 | 163 | 164 | if __name__ == '__main__': 165 | main() 166 | -------------------------------------------------------------------------------- /build/images/build/root/usr/local/bin/publish-helm-chart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eo pipefail 3 | 4 | realpath() { 5 | [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}" 6 | } 7 | 8 | chart_dir="$1" 9 | gs_bucket="$2" 10 | 11 | if [ -z "$chart_dir" ] || [ -z "$gs_bucket" ] ; then 12 | echo "Usage: publish-helm-chart.sh CHART_PATH GS_BUCKET" >&2 13 | exit 2 14 | fi 15 | 16 | tmp="$(mktemp -d)" 17 | chart_full_dir="$(realpath "$chart_dir")" 18 | 19 | (cd "$chart_dir" && helm dep update) 20 | cd "$tmp" 21 | helm package "$chart_full_dir" 22 | gsutil -q cp gs://$gs_bucket/index.yaml ./index.yaml 23 | helm repo index --url https://$gs_bucket.storage.googleapis.com/ --merge ./index.yaml ./ 24 | gsutil -q rsync ./ gs://kluster-charts 25 | -------------------------------------------------------------------------------- /build/images/build/root/usr/local/bin/setup-credentials-helper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | : "${GOOGLE_CREDENTIALS:="$(cat "$PLUGIN_GOOGLE_CREDENTIALS_FILE" 2>/dev/null)"}" 3 | : "${GOOGLE_CLOUD_PROJECT:="$PLUGIN_GOOGLE_CLOUD_PROJECT"}" 4 | : "${GOOGLE_CLOUD_CLUSTER:="$PLUGIN_GOOGLE_CLOUD_CLUSTER"}" 5 | : "${GOOGLE_CLOUD_ZONE:="$PLUGIN_GOOGLE_CLOUD_ZONE"}" 6 | : "${SSH_KEY:="$PLUGIN_SSH_KEY"}" 7 | : "${DOCKER_USERNAME:="$PLUGIN_DOCKER_USERNAME"}" 8 | : "${DOCKER_PASSWORD:="$PLUGIN_DOCKER_PASSWORD"}" 9 | : "${DOCKER_REGISTRY:="${PLUGIN_DOCKER_REGISTRY:-docker.io}"}" 10 | 11 | DOCKER_REGISTRY_HOST="$(echo "${DOCKER_REGISTRY}" | awk 'BEGIN{ FS="/" }{print $1}')" 12 | 13 | export PATH="$CI_WORKSPACE/bin:$PATH" 14 | 15 | require_param() { 16 | declare name="$1" 17 | local env_name 18 | env_name="$(echo "$name" | tr /a-z/ /A-Z/)" 19 | if [ -z "${!env_name}" ] ; then 20 | echo "You must define \"$name\" parameter or define $env_name environment variable" >&2 21 | exit 2 22 | fi 23 | } 24 | 25 | require_google_credentials() { 26 | if [ -z "$GOOGLE_CREDENTIALS" ] ; then 27 | echo "You must define \"google_credentials_file\" parameter or define GOOGLE_CREDENTIALS environment variable" >&2 28 | exit 2 29 | fi 30 | } 31 | 32 | run() { 33 | echo "+" "$@" 34 | "$@" 35 | } 36 | 37 | if [ -n "$DOCKER_PASSWORD" ] ; then 38 | require_param DOCKER_USERNAME 39 | echo "+ docker login $DOCKER_REGISTRY_HOST -u $DOCKER_USERNAME" 40 | echo "$DOCKER_PASSWORD" | docker login "$DOCKER_REGISTRY_HOST" -u "$DOCKER_USERNAME" --password-stdin 41 | fi 42 | 43 | if [ -n "$GOOGLE_CREDENTIALS" ] ; then 44 | echo "$GOOGLE_CREDENTIALS" > /run/google-credentials.json 45 | run gcloud auth activate-service-account --quiet --key-file=/run/google-credentials.json 46 | run gcloud auth configure-docker --quiet 47 | fi 48 | 49 | if [ -n "$GOOGLE_CLOUD_PROJECT" ] ; then 50 | run gcloud config set project "$GOOGLE_CLOUD_PROJECT" 51 | fi 52 | 53 | if [ -n "$GOOGLE_CLOUD_CLUSTER" ] ; then 54 | require_google_credentials 55 | require_param "google_cloud_project" 56 | require_param "google_cloud_zone" 57 | 58 | run gcloud container clusters get-credentials "$GOOGLE_CLOUD_CLUSTER" --project "$GOOGLE_CLOUD_PROJECT" --zone "$GOOGLE_CLOUD_ZONE" 59 | # Display kubernetees versions (usefull for debugging) 60 | run kubectl version 61 | fi 62 | 63 | if [ -n "$SSH_KEY" ] ; then 64 | require_param "home" 65 | test -d "$HOME/.ssh" || mkdir -p "$HOME/.ssh" 66 | echo "$SSH_KEY" > "$HOME/.ssh/id_rsa" 67 | chmod 0400 "$HOME/.ssh/id_rsa" 68 | echo "Installed ssh key into $HOME/.ssh/id_rsa" 69 | run ssh-keygen -y -f "$HOME/.ssh/id_rsa" 70 | fi 71 | 72 | if [[ -n "${GIT_USER}" && -n "${GIT_PASSWORD}" ]] ; then 73 | git config --global user.email "${GIT_EMAIL:-bot@presslabs.com}" 74 | git config --global user.name "${GIT_USER:-presslabs-bot}" 75 | 76 | cat <> ~/.netrc 77 | machine ${GIT_HOST:-github.com} 78 | login ${GIT_USER} 79 | password ${GIT_PASSWORD} 80 | EOF 81 | fi 82 | -------------------------------------------------------------------------------- /build/images/build/root/usr/local/bin/setup-github-credentials.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$GH_USER" ]; then 4 | echo "You must define \"GH_USER\" environment variable" >&2 5 | exit 2 6 | fi 7 | 8 | if [ -z "$GH_PASSWORD" ]; then 9 | echo "You must define \"GH_PASSWORD\" environment variable" >&2 10 | exit 2 11 | fi 12 | 13 | git config --global user.email ${GH_EMAIL:-no-reply@kluster.toolbox} 14 | git config --global user.name $GH_USER 15 | cat < ~/.netrc 16 | machine github.com 17 | login ${GH_USER} 18 | password ${GH_PASSWORD} 19 | EOF 20 | -------------------------------------------------------------------------------- /build/images/build/root/usr/local/bin/xvfb-chrome: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2016 The Upbound Authors. All rights reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | _kill_procs() { 18 | kill -TERM $chrome 19 | wait $chrome 20 | } 21 | 22 | # Setup a trap to catch SIGTERM and relay it to child processes 23 | trap _kill_procs SIGTERM 24 | 25 | # Start Chrome inside xvfb 26 | xvfb-run -a -s "-screen 0 1920x1080x24 -nolisten tcp" /opt/google/chrome/chrome --no-sandbox $@ & 27 | chrome=$! 28 | 29 | wait $chrome 30 | -------------------------------------------------------------------------------- /build/makelib/cache.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Pressinfra SRL. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ifndef __CACHE_MAKEFILE__ 16 | __CACHE_MAKEFILE__ := included 17 | 18 | RCLONE_BIN ?= RCLONE_VERSION=true rclone 19 | RCLONE_ARGS ?= -q --config /dev/null 20 | 21 | ifeq ($(CACHE_BACKEND),) 22 | $(error You must define CACHE_BACKEND before adding cache support. See format at https://rclone.org/docs/#backend-path-to-dir) 23 | endif 24 | 25 | CACHE_COMPRESSION ?= gzip 26 | 27 | ifneq ($(DRONE_PULL_REQUEST),) 28 | CACHE_NAME ?= $(PROJECT_NAME)-pr$(DRONE_PULL_REQUEST)-cache 29 | else ifneq ($(DRONE_TAG),) 30 | CACHE_NAME ?= $(PROJECT_NAME)-$(DRONE_TAG)-cache 31 | else 32 | CACHE_NAME ?= $(PROJECT_NAME)-$(BRANCH_NAME)-cache 33 | endif 34 | 35 | 36 | RCLONE := $(RCLONE_BIN) $(RCLONE_ARGS) 37 | 38 | ifeq ($(CACHE_COMPRESSION),gzip) 39 | TAR_COMPRESS_ARGS += -z 40 | CACHE_EXTENSION_SUFFIX := .gz 41 | endif 42 | 43 | CACHE_FILE := $(CACHE_NAME).tar$(CACHE_EXTENSION_SUFFIX) 44 | 45 | .PHONY: cache.store cache.restore 46 | 47 | cache.store: 48 | @$(INFO) storing cache $(CACHE_FILE) into $(CACHE_BACKEND) 49 | @$(RCLONE) mkdir $(CACHE_BACKEND) || $(FAIL) 50 | @tar -C $(CACHE_DIR) $(TAR_COMPRESS_ARGS) -cf - ./ | $(RCLONE) rcat $(CACHE_BACKEND)/$(CACHE_FILE) || $(FAIL) 51 | @$(OK) cache store 52 | 53 | cache.restore: |$(CACHE_DIR) 54 | @$(INFO) restoring cache from $(CACHE_BACKEND)/$(CACHE_FILE) 55 | @$(RCLONE) cat $(CACHE_BACKEND)/$(CACHE_FILE) | tar -C $(CACHE_DIR) $(TAR_COMPRESS_ARGS) -x \ 56 | && $(OK) cache restore \ 57 | || $(WARN) cache restore failed 58 | 59 | endif # __CACHE_MAKEFILE__ 60 | -------------------------------------------------------------------------------- /build/makelib/gcp.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Pressinfra SRL. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ifndef __GOOGLE_CLOUD_MAKEFILE__ 16 | __GOOGLE_CLOUD_MAKEFILE__ := included 17 | 18 | ifeq ($(origin GOOGLE_CLOUD_PROJECT),undefined) 19 | ifneq ($(GCLOUD_PROJECT),) 20 | GOOGLE_CLOUD_PROJECT := $(GCLOUD_PROJECT) 21 | else 22 | GOOGLE_CLOUD_PROJECT := $(shell gcloud config get-value project) 23 | endif 24 | endif 25 | 26 | ifeq ($(GOOGLE_CLOUD_PROJECT),) 27 | $(error Could not determine current Google Cloud Project. Set the GOOGLE_CLOUD_PROJECT environment variable or set with `gcloud config`) 28 | else 29 | export GOOGLE_CLOUD_PROJECT 30 | endif 31 | 32 | endif # __GOOGLE_CLOUD_MAKEFILE__ 33 | -------------------------------------------------------------------------------- /build/makelib/gettext.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Upbound Authors. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ifndef __GETTEXT_MAKEFILE__ 16 | __GETTEXT_MAKEFILE__ := included 17 | 18 | # ==================================================================================== 19 | # Options 20 | 21 | # ==================================================================================== 22 | # Translations 23 | 24 | # The list of languages to generate translations for 25 | LANGUAGES ?= 26 | 27 | LOCALES_DIR ?= $(ROOT_DIR)/locales 28 | $(LOCALES_DIR): 29 | @mkdir -p $(LOCALES_DIR) 30 | 31 | ifeq ($(LANGUAGES),) 32 | $(error You must specify the LANGUAGES variable in order to handle translations) 33 | endif 34 | 35 | ifeq ($(HOSTOS),darwin) 36 | MSGFMT = /usr/local/opt/gettext/bin/msgfmt 37 | MSGMERGE = /usr/local/opt/gettext/bin/msgmerge 38 | else 39 | MSGFMT = msgfmt 40 | MSGMERGE = msgmerge 41 | endif 42 | 43 | PO_FILES := $(shell find $(LOCALES_DIR) -name '*.po') 44 | POT_FILES := $(shell find $(LOCALES_DIR) -mindepth 1 -maxdepth 1 -name '*.pot') 45 | 46 | # lint the code 47 | $(eval $(call common.target,translations)) 48 | 49 | gettext.lint: 50 | @$(INFO) msgfmt check 51 | $(foreach p,$(PO_FILES),@$(MSGFMT) -c $(p) || $(FAIL) ${\n}) 52 | @$(OK) msgfmt check 53 | 54 | .gettext.merge: 55 | @$(INFO) msgmerge 56 | $(foreach l,$(LANGUAGES),@mkdir -p $(LOCALES_DIR)/$(l) || $(FAIL) ${\n}) 57 | $(foreach pot,$(POT_FILES),$(foreach l,$(LANGUAGES), \ 58 | @touch $(LOCALES_DIR)/$(l)/$(basename $(notdir $(pot))).po || $(FAIL) ${\n} \ 59 | @$(MSGMERGE) -q --no-wrap --sort-output --no-fuzzy-matching --lang=$(l) -U "$(LOCALES_DIR)/$(l)/$(basename $(notdir $(pot))).po" "$(pot)" || $(FAIL) ${\n} \ 60 | )) 61 | @find $(LOCALES_DIR) -name '*.po~' -delete 62 | @find $(LOCALES_DIR) -name '*.pot~' -delete 63 | @$(OK) msgmerge 64 | 65 | .gettext.build: 66 | @$(INFO) copying translations 67 | @rm -rf $(OUTPUT_DIR)/locales 68 | @cp -a $(LOCALES_DIR) $(OUTPUT_DIR)/locales 69 | @$(OK) copying translations 70 | 71 | .PHONY: gettext.lint .gettext.build .gettext.merge 72 | 73 | # ==================================================================================== 74 | # Common Targets 75 | .lint.run: gettext.lint 76 | 77 | .translations.run: .gettext.merge 78 | 79 | .build.code: .gettext.build 80 | 81 | endif # __GETTEXT_MAKEFILE__ 82 | -------------------------------------------------------------------------------- /build/makelib/git-publish.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2019 The Pressinfra Authors. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ifndef __GIT_PUBLISH_MAKEFILE__ 16 | __GIT_PUBLISH_MAKEFILE__ := included 17 | 18 | # ==================================================================================== 19 | # Options 20 | 21 | PUBLISH_BRANCH ?= master 22 | PUBLISH_PREFIX ?= / 23 | PUBLISH_TAGS ?= true 24 | 25 | ifeq ($(PUBLISH_DIRS),) 26 | PUBLISH_DIR ?= $(CURDIR:$(abspath $(ROOT_DIR))/%=%) 27 | endif 28 | 29 | PUBLISH_WORK_BRANCH := build/split-$(COMMIT_HASH)/$(PUBLISH_DIR) 30 | PUBLISH_WORK_DIR := $(WORK_DIR)/git-publish/$(PUBLISH_DIR) 31 | PUBLISH_GIT := git -C $(PUBLISH_WORK_DIR) 32 | 33 | GIT_MERGE_ARGS ?= --ff-only 34 | GIT_SUBTREE_MERGE_ARGS ?= --squash 35 | ifeq ($(PUBLISH_TAGS),true) 36 | GIT_PUSH_ARGS := --follow-tags 37 | endif 38 | 39 | PUBLISH_DIRS ?= $(PUBLISH_DIR) 40 | 41 | # ==================================================================================== 42 | # git publish targets 43 | 44 | git.urlize = $(patsubst %,https://%,$(patsubst %.git,%,$(patsubst https://%,%,$(patsubst git@github.com:%,https://github.com/%,$(1))))) 45 | git.workbranch = build/split-$(COMMIT_HASH)/$(1) 46 | 47 | # 1 publish directory 48 | define git.publish 49 | 50 | $(ROOT_DIR)/.git/refs/heads/$(call git.workbranch,$(1)): 51 | @$(INFO) git subtree split $(1) 52 | @cd $(ROOT_DIR) && git subtree split -q -P $(1) -b $(call git.workbranch,$(1)) $(COMMIT_HASH) 53 | @$(OK) git subtree split $(1) 54 | .PHONY: .git.build.artifacts.$(1) 55 | .git.build.artifacts.$(1): $(ROOT_DIR)/.git/refs/heads/$(call git.workbranch,$(1)) 56 | .git.build.artifacts: .git.build.artifacts.$(1) 57 | 58 | .PHONY: .git.clean.$(1) 59 | .git.clean.$(1): 60 | @cd $(ROOT_DIR) && git branch -D $(call git.workbranch,$(1)) || true 61 | .git.clean: .git.clean.$(1) 62 | 63 | .PHONY: .do.git.publish.$(1) 64 | .do.git.publish.$(1): |$(ROOT_DIR)/.git/refs/heads/$(call git.workbranch,$(1)) 65 | @$(MAKE) -C $(1) .git.publish 66 | 67 | endef 68 | 69 | ifeq ($(filter-out $(PUBLISH_DIR),$(PUBLISH_DIRS)),) 70 | .git.publish: |$(ROOT_DIR)/.git/refs/heads/$(PUBLISH_WORK_BRANCH) 71 | @$(INFO) Publishing $(1) to $(PUBLISH_REPO)@$(PUBLISH_BRANCH) under $(PUBLISH_PREFIX) 72 | @rm -rf $(PUBLISH_WORK_DIR) && mkdir -p $(PUBLISH_WORK_DIR) 73 | @$(PUBLISH_GIT) init -q 74 | @$(PUBLISH_GIT) remote add origin $(PUBLISH_REPO) 75 | @$(PUBLISH_GIT) remote add upstream $(ROOT_DIR)/.git 76 | @$(PUBLISH_GIT) fetch -q upstream +refs/heads/$(PUBLISH_WORK_BRANCH): 77 | @$(PUBLISH_GIT) checkout -q -b $(PUBLISH_BRANCH) 78 | @set -e; cd $(PUBLISH_WORK_DIR); if git ls-remote --heads origin | grep -q refs/heads/$(PUBLISH_BRANCH); then \ 79 | $(PUBLISH_GIT) fetch -q origin +refs/heads/$(PUBLISH_BRANCH): ;\ 80 | $(PUBLISH_GIT) reset -q --hard origin/$(PUBLISH_BRANCH) ;\ 81 | $(PUBLISH_GIT) branch -q -u origin/$(PUBLISH_BRANCH) ;\ 82 | fi 83 | ifeq ($(PUBLISH_PREFIX),/) 84 | @set -e; \ 85 | $(PUBLISH_GIT) merge -q $(GIT_MERGE_ARGS) \ 86 | -m "Merge '$(PUBLISH_DIR)' from $(patsubst https://github.com/%,%,$(call git.urlize,$(REMOTE_URL)))@$(COMMIT_HASH)" \ 87 | upstream/$(PUBLISH_WORK_BRANCH) ;\ 88 | if [ "$(PUBLISH_TAGS)" == "true" ] ; then \ 89 | for t in $(TAGS) ; do \ 90 | $(PUBLISH_GIT) tag -a -m "$$t" $$t ;\ 91 | done ;\ 92 | fi 93 | else 94 | @set -e; \ 95 | if [ -d "$(PUBLISH_WORK_DIR)/$(PUBLISH_PREFIX)" ] ; then \ 96 | $(PUBLISH_GIT) subtree -q merge -P $(PUBLISH_PREFIX) $(GIT_SUBTREE_MERGE_ARGS) \ 97 | -m "Merge '$(PUBLISH_DIR)' from $(patsubst https://github.com/%,%,$(call git.urlize,$(REMOTE_URL)))@$(COMMIT_HASH)" \ 98 | upstream/$(PUBLISH_WORK_BRANCH) ;\ 99 | else \ 100 | $(PUBLISH_GIT) subtree add -q -P $(PUBLISH_PREFIX) $(GIT_SUBTREE_MERGE_ARGS) \ 101 | -m "Add '$(PUBLISH_DIR)' from $(patsubst https://github.com/%,%,$(call git.urlize,$(REMOTE_URL)))@$(COMMIT_HASH)" \ 102 | $(ROOT_DIR)/.git $(PUBLISH_WORK_BRANCH) ;\ 103 | fi 104 | endif 105 | @$(PUBLISH_GIT) push -u origin $(GIT_PUSH_ARGS) $(PUBLISH_BRANCH) 106 | @$(OK) Published $(1) to $(PUBLISH_REPO)@$(PUBLISH_BRANCH) 107 | else 108 | .git.publish: $(foreach d,$(PUBLISH_DIRS),.do.git.publish.$(d)) 109 | endif 110 | 111 | $(foreach d,$(PUBLISH_DIRS), $(eval $(call git.publish,$(d)))) 112 | 113 | .PHONY: .git.clean .git.build.artifacts .git.publish 114 | 115 | # ==================================================================================== 116 | # Common Targets 117 | 118 | # if PUBLISH_DIRS is defined the invoke publish for each dir 119 | ifneq ($(filter-out $(PUBLISH_DIR),$(PUBLISH_DIRS)),) 120 | 121 | .publish.init: .git.build.artifacts 122 | clean: .git.clean 123 | 124 | # only publish for master and release branches 125 | # also, if publishing for tags is enabled, 126 | # publish if the current commit is a tag 127 | ifneq ($(filter master release-%,$(BRANCH_NAME)),) 128 | .publish.run: $(addprefix .do.git.publish.,$(PUBLISH_DIRS)) 129 | else ifeq ($(PUBLISH_TAGS),true) 130 | ifneq ($(TAGS),) 131 | .publish.run: $(addprefix .do.git.publish.,$(PUBLISH_DIRS)) 132 | endif 133 | endif 134 | 135 | else # assume this .mk file is being included for a single dir 136 | 137 | ifeq ($(PUBLISH_REPO),) 138 | $(error You must specify the PUBLISH_REPO variable in order to handle git publishing) 139 | endif 140 | 141 | .publish.init: .git.build.artifacts 142 | clean: .git.clean 143 | 144 | endif # PUBLISH_DIRS 145 | 146 | 147 | endif # __GIT_PUBLISH_MAKEFILE__ 148 | -------------------------------------------------------------------------------- /build/makelib/helm.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Upbound Authors. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ifndef __HELM_MAKEFILE__ 16 | __HELM_MAKEFILE__ := included 17 | 18 | include $(COMMON_SELF_DIR)/k8s-tools.mk 19 | 20 | # the charts directory 21 | HELM_CHARTS_DIR ?= deploy/charts 22 | 23 | HELM_CHARTS ?= $(patsubst $(HELM_CHARTS_DIR)/%,%,$(shell find $(HELM_CHARTS_DIR) -mindepth 1 -maxdepth 1 -type d)) 24 | 25 | # the base url where helm charts are published 26 | # ifeq ($(HELM_BASE_URL),) 27 | # $(error the variable HELM_BASE_URL must be set prior to including helm.mk) 28 | # endif 29 | 30 | # the charts output directory 31 | HELM_OUTPUT_DIR ?= $(OUTPUT_DIR)/charts 32 | 33 | # the helm index file 34 | HELM_INDEX := $(HELM_OUTPUT_DIR)/index.yaml 35 | 36 | # helm home 37 | HELM_HOME := $(abspath $(WORK_DIR)/helm) 38 | HELM_CHARTS_WORK_DIR := $(abspath $(WORK_DIR)/charts) 39 | export HELM_HOME 40 | 41 | # remove the leading `v` for helm chart versions 42 | HELM_CHART_VERSION := $(VERSION:v%=%) 43 | HELM_APP_VERSION ?= $(VERSION) 44 | 45 | # ==================================================================================== 46 | # Tools install targets 47 | 48 | HELM_VERSION := 3.8.2 49 | HELM_DOWNLOAD_URL := https://get.helm.sh/helm-v$(HELM_VERSION)-$(HOSTOS)-$(HOSTARCH).tar.gz 50 | $(eval $(call tool.download.tar.gz,helm,$(HELM_VERSION),$(HELM_DOWNLOAD_URL))) 51 | 52 | # ==================================================================================== 53 | # Helm Targets 54 | 55 | $(HELM_HOME): $(HELM) 56 | @mkdir -p $(HELM_HOME) 57 | 58 | $(HELM_OUTPUT_DIR): 59 | @mkdir -p $(HELM_OUTPUT_DIR) 60 | 61 | $(HELM_CHARTS_WORK_DIR): 62 | @mkdir -p $(HELM_CHARTS_WORK_DIR) 63 | 64 | define helm.chart 65 | 66 | .helm.package.init.$(1): $(HELM_CHARTS_WORK_DIR) 67 | @rm -rf $(HELM_CHARTS_WORK_DIR)/$(1) 68 | @cp -a $(abspath $(HELM_CHARTS_DIR)/$(1)) $(HELM_CHARTS_WORK_DIR)/$(1) 69 | .helm.package.run.$(1): $(HELM_OUTPUT_DIR) $(HELM_HOME) 70 | @$(INFO) helm package $(1) $(HELM_CHART_VERSION) 71 | @$(HELM) package --version $(HELM_CHART_VERSION) --app-version $(HELM_APP_VERSION) -d $(HELM_OUTPUT_DIR) $(HELM_CHARTS_WORK_DIR)/$(1) 72 | @$(OK) helm package $(1) $(HELM_CHART_VERSION) 73 | .helm.package.done.$(1): ; @: 74 | .helm.package.$(1): 75 | @$(MAKE) .helm.package.init.$(1) 76 | @$(MAKE) .helm.package.run.$(1) 77 | @$(MAKE) .helm.package.done.$(1) 78 | 79 | .PHONY: .helm.package.init.$(1) .helm.package.run.$(1) .helm.package.done.$(1) .helm.package.$(1) 80 | 81 | $(HELM_OUTPUT_DIR)/$(1)-$(HELM_CHART_VERSION).tgz: $(HELM_HOME) $(HELM_OUTPUT_DIR) $(shell find $(HELM_CHARTS_DIR)/$(1) -type f) 82 | 83 | .PHONY: .helm.lint.$(1) 84 | .helm.lint.$(1): $(HELM_HOME) .helm.dep.$(1) 85 | @$(INFO) helm lint $(1) 86 | @rm -rf $(abspath $(HELM_CHARTS_DIR)/$(1)/charts) 87 | @$(HELM) dependency build $(abspath $(HELM_CHARTS_DIR)/$(1)) 88 | @$(HELM) lint $(abspath $(HELM_CHARTS_DIR)/$(1)) $(HELM_CHART_LINT_ARGS_$(1)) --strict || $$(FAIL) 89 | @$(OK) helm lint $(1) 90 | 91 | helm.lint: .helm.lint.$(1) 92 | 93 | .PHONY: .helm.dep.$(1) 94 | .helm.dep.$(1): $(HELM_HOME) 95 | @$(INFO) helm dep $(1) $(HELM_CHART_VERSION) 96 | @$(HELM) dependency update $(abspath $(HELM_CHARTS_DIR)/$(1)) 97 | @$(OK) helm dep $(1) $(HELM_CHART_VERSION) 98 | 99 | helm.dep: .helm.dep.$(1) 100 | 101 | $(HELM_INDEX): .helm.package.$(1) 102 | endef 103 | $(foreach p,$(HELM_CHARTS),$(eval $(call helm.chart,$(p)))) 104 | 105 | $(HELM_INDEX): $(HELM_HOME) $(HELM_OUTPUT_DIR) 106 | @$(INFO) helm index 107 | @$(HELM) repo index $(HELM_OUTPUT_DIR) 108 | @$(OK) helm index 109 | 110 | helm.build: $(HELM_INDEX) 111 | 112 | .helm.clean: 113 | @rm -fr $(HELM_OUTPUT_DIR) 114 | 115 | .PHONY: helm.lint helm.build helm.dep .helm.clean 116 | 117 | # ==================================================================================== 118 | # Common Targets 119 | 120 | .build.check: helm.lint 121 | .build.artifacts: helm.build 122 | .lint.run: helm.lint 123 | clean: .helm.clean 124 | 125 | # ==================================================================================== 126 | # Special Targets 127 | 128 | define HELM_HELPTEXT 129 | Helm Targets: 130 | helm.dep Upgrade charts dependencies 131 | 132 | endef 133 | export HELM_HELPTEXT 134 | 135 | .PHONY: .helm.help 136 | .helm.help: 137 | @echo "$$HELM_HELPTEXT" 138 | 139 | .help: .helm.help 140 | 141 | endif # __HELM_MAKEFILE__ 142 | -------------------------------------------------------------------------------- /build/makelib/image.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Upbound Authors. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # ==================================================================================== 16 | # Options 17 | 18 | ifndef __DOCKER_MAKEFILE__ 19 | __DOCKER_MAKEFILE__ := included 20 | 21 | ifeq ($(origin IMAGE_DIR),undefined) 22 | IMAGE_DIR := $(ROOT_DIR)/images 23 | endif 24 | 25 | ifeq ($(origin IMAGE_OUTPUT_DIR),undefined) 26 | IMAGE_OUTPUT_DIR := $(OUTPUT_DIR)/images/$(PLATFORM) 27 | endif 28 | 29 | ifeq ($(origin IMAGE_TEMP_DIR),undefined) 30 | IMAGE_TEMP_DIR := $(shell mktemp -d) 31 | endif 32 | 33 | ifeq ($(DRONE),true) 34 | DOCKER_HOST ?= tcp://docker:2375 35 | export DOCKER_HOST 36 | endif 37 | 38 | # a registry that is scoped to the current build tree on this host. this enables 39 | # us to have isolation between concurrent builds on the same system, as in the case 40 | # of multiple working directories or on a CI system with multiple executors. All images 41 | # tagged with this build registry can safely be untagged/removed at the end of the build. 42 | ifeq ($(origin BUILD_REGISTRY), undefined) 43 | ifeq ($(CI_BUILD_NUMBER),) 44 | BUILD_REGISTRY := build/$(shell echo $(HOSTNAME)-$(ROOT_DIR) | shasum -a 256 | cut -c1-8) 45 | else 46 | BUILD_REGISTRY := build/$(CI_BUILD_NUMBER) 47 | endif 48 | endif 49 | 50 | # In order to reduce built time especially on jenkins, we maintain a cache 51 | # of already built images. This cache contains images that can be used to help speed 52 | # future docker build commands using docker's content addressable schemes. 53 | # All cached images go in in a 'cache/' local registry and we follow an MRU caching 54 | # policy -- keeping images that have been referenced around and evicting images 55 | # that have to been referenced in a while (and according to a policy). Note we can 56 | # not rely on the image's .CreatedAt date since docker only updates then when the 57 | # image is created and not referenced. Instead we keep a date in the Tag. 58 | CACHE_REGISTRY := cache 59 | 60 | # prune images that are at least this many hours old 61 | PRUNE_HOURS ?= 48 62 | 63 | # prune keeps at least this many images regardless of how old they are 64 | PRUNE_KEEP ?= 24 65 | 66 | # don't actually prune just show what prune would do. 67 | PRUNE_DRYRUN ?= 0 68 | 69 | # the cached image format 70 | CACHE_DATE_FORMAT := "%Y-%m-%d.%H%M%S" 71 | CACHE_PRUNE_DATE := $(shell export TZ="UTC+$(PRUNE_HOURS)"; date +"$(CACHE_DATE_FORMAT)") 72 | CACHE_TAG := $(shell date -u +"$(CACHE_DATE_FORMAT)") 73 | 74 | REGISTRIES ?= $(DOCKER_REGISTRY) 75 | 76 | # docker accepted image platform format 77 | # eg linux/arm64 -> linux/arm64/v8, linux/armv7 -> linux/arm/v7, linux/armv6 -> linux/arm/v6 78 | dockerify-platform = $(subst armv7,arm/v7,$(subst arm64,arm64/v8,$(1))) 79 | IMAGE_ARCHS := $(subst linux_,,$(filter linux_%,$(PLATFORMS))) 80 | IMAGE_PLATFORMS := $(call dockerify-platform,$(subst _,/,$(filter linux_%,$(PLATFORMS)))) 81 | IMAGE_PLATFORM = $(call dockerify-platform,linux/$(ARCH)) 82 | 83 | IMAGE_TAG ?= $(subst +,-,$(VERSION)) 84 | PROMOTE_TAG := $(if $(PROMOTE_IMAGE_TAG),$(PROMOTE_IMAGE_TAG),$(IMAGE_TAG)) 85 | 86 | # if set to 1 docker image caching will not be used. 87 | CACHEBUST ?= 0 88 | ifeq ($(CACHEBUST),1) 89 | BUILD_ARGS += --no-cache 90 | endif 91 | 92 | # if V=0 avoid showing verbose output from docker build 93 | ifeq ($(V),0) 94 | BUILD_ARGS ?= -q 95 | endif 96 | 97 | # if PULL=1 we will always check if there is a newer base image 98 | PULL ?= 1 99 | ifeq ($(PULL),1) 100 | BUILD_BASE_ARGS += --pull 101 | endif 102 | BUILD_BASE_ARGS += $(BUILD_ARGS) 103 | export PULL 104 | 105 | ifeq ($(HOSTOS),Linux) 106 | SELF_CID := $(shell cat /proc/self/cgroup | grep docker | grep -o -E '[0-9a-f]{64}' | head -n 1) 107 | endif 108 | 109 | # ===================================================================================== 110 | # Image Targets 111 | 112 | .do.img.clean: 113 | @for i in $(CLEAN_IMAGES); do \ 114 | if [ -n "$$(docker images -q $$i)" ]; then \ 115 | for c in $$(docker ps -a -q --no-trunc --filter=ancestor=$$i); do \ 116 | if [ "$$c" != "$(SELF_CID)" ]; then \ 117 | $(INFO) stopping and removing container $${c} referencing image $$i; \ 118 | docker stop $${c}; \ 119 | docker rm $${c}; \ 120 | fi; \ 121 | done; \ 122 | $(INFO) cleaning image $$i; \ 123 | docker rmi $$i > /dev/null 2>&1 || true; \ 124 | fi; \ 125 | done 126 | 127 | # this will clean everything for this build 128 | .img.clean: 129 | @$(INFO) cleaning images for $(BUILD_REGISTRY) 130 | @$(MAKE) .do.img.clean CLEAN_IMAGES="$(shell docker images | grep -E '^$(BUILD_REGISTRY)/' | awk '{print $$1":"$$2}')" 131 | @$(OK) cleaning images for $(BUILD_REGISTRY) 132 | 133 | .img.done: 134 | @rm -fr $(IMAGE_TEMP_DIR) 135 | 136 | .img.cache: 137 | @for i in $(CACHE_IMAGES); do \ 138 | IMGID=$$(docker images -q $$i); \ 139 | if [ -n "$$IMGID" ]; then \ 140 | $(INFO) caching image $$i; \ 141 | CACHE_IMAGE=$(CACHE_REGISTRY)/$${i#*/}; \ 142 | docker tag $$i $${CACHE_IMAGE}:$(CACHE_TAG); \ 143 | for r in $$(docker images --format "{{.ID}}#{{.Repository}}:{{.Tag}}" | grep $$IMGID | grep $(CACHE_REGISTRY)/ | grep -v $${CACHE_IMAGE}:$(CACHE_TAG)); do \ 144 | docker rmi $${r#*#} > /dev/null 2>&1 || true; \ 145 | done; \ 146 | fi; \ 147 | done 148 | 149 | # prune removes old cached images 150 | img.prune: 151 | @$(INFO) pruning images older than $(PRUNE_HOURS) keeping a minimum of $(PRUNE_KEEP) images 152 | @EXPIRED=$$(docker images --format "{{.Tag}}#{{.Repository}}:{{.Tag}}" \ 153 | | grep -E '$(CACHE_REGISTRY)/' \ 154 | | sort -r \ 155 | | awk -v i=0 -v cd="$(CACHE_PRUNE_DATE)" -F "#" '{if ($$1 <= cd && i >= $(PRUNE_KEEP)) print $$2; i++ }') &&\ 156 | for i in $$EXPIRED; do \ 157 | $(INFO) removing expired cache image $$i; \ 158 | [ $(PRUNE_DRYRUN) = 1 ] || docker rmi $$i > /dev/null 2>&1 || true; \ 159 | done 160 | @for i in $$(docker images -q -f dangling=true); do \ 161 | $(INFO) removing dangling image $$i; \ 162 | docker rmi $$i > /dev/null 2>&1 || true; \ 163 | done 164 | @$(OK) pruning 165 | 166 | debug.nuke: 167 | @for c in $$(docker ps -a -q --no-trunc); do \ 168 | if [ "$$c" != "$(SELF_CID)" ]; then \ 169 | $(INFO) stopping and removing container $${c}; \ 170 | docker stop $${c}; \ 171 | docker rm $${c}; \ 172 | fi; \ 173 | done 174 | @for i in $$(docker images -q); do \ 175 | $(INFO) removing image $$i; \ 176 | docker rmi -f $$i > /dev/null 2>&1; \ 177 | done 178 | 179 | 180 | # 1: registry 2: image, 3: arch 181 | define repo.targets 182 | .PHONY: .img.release.build.$(1).$(2).$(3) 183 | .img.release.build.$(1).$(2).$(3): 184 | @$(INFO) docker build $(1)/$(2):$(IMAGE_TAG)-$(3) 185 | @docker tag $(BUILD_REGISTRY)/$(2)-$(3) $(1)/$(2):$(IMAGE_TAG)-$(3) || $(FAIL) 186 | @$(OK) docker build $(1)/$(2):$(IMAGE_TAG)-$(3) 187 | .img.release.build: .img.release.build.$(1).$(2).$(3) 188 | 189 | .PHONY: .img.release.publish.$(1).$(2).$(3) 190 | .img.release.publish.$(1).$(2).$(3): 191 | @$(INFO) docker push $(1)/$(2):$(IMAGE_TAG)-$(3) 192 | @docker push $(1)/$(2):$(IMAGE_TAG)-$(3) || $(FAIL) 193 | @$(OK) docker push $(1)/$(2):$(IMAGE_TAG)-$(3) 194 | .img.release.publish: .img.release.publish.$(1).$(2).$(3) 195 | 196 | .PHONY: .img.release.promote.$(1).$(2).$(3) 197 | .img.release.promote.$(1).$(2).$(3): 198 | @$(INFO) docker promote $(1)/$(2):$(IMAGE_TAG)-$(3) to $(1)/$(2):$(CHANNEL)-$(PROMOTE_TAG)-$(3) 199 | 200 | @[ "$(CHANNEL)" = "master" ] ||\ 201 | docker buildx imagetools create \ 202 | --tag $(1)/$(2):$(CHANNEL)-$(PROMOTE_TAG)-$(3) \ 203 | $(1)/$(2):$(IMAGE_TAG)-$(3) || $(FAIL) 204 | 205 | @$(OK) docker promote 206 | .img.release.promote: .img.release.promote.$(1).$(2).$(3) 207 | 208 | .PHONY: .img.release.clean.$(1).$(2).$(3) 209 | .img.release.clean.$(1).$(2).$(3): 210 | @[ -z "$$$$(docker images -q $(1)/$(2):$(IMAGE_TAG)-$(3))" ] || docker rmi $(1)/$(2):$(IMAGE_TAG)-$(3) 211 | .img.release.clean: .img.release.clean.$(1).$(2).$(3) 212 | endef 213 | $(foreach r,$(REGISTRIES), $(foreach i,$(IMAGES), $(foreach a,$(IMAGE_ARCHS),$(eval $(call repo.targets,$(r),$(i),$(a)))))) 214 | 215 | 216 | .PHONY: .img.release.manifest.publish.% 217 | .img.release.manifest.publish.%: .img.release.publish 218 | @$(INFO) docker buildx imagetools create \ 219 | --tag $(DOCKER_REGISTRY)/$*:$(IMAGE_TAG) \ 220 | $(patsubst %,$(DOCKER_REGISTRY)/$*:$(IMAGE_TAG)-%,$(IMAGE_ARCHS)) 221 | 222 | @docker buildx imagetools create \ 223 | --tag $(DOCKER_REGISTRY)/$*:$(IMAGE_TAG) \ 224 | $(patsubst %,$(DOCKER_REGISTRY)/$*:$(IMAGE_TAG)-%,$(IMAGE_ARCHS)) || $(FAIL) 225 | 226 | @$(OK) docker buildx imagetools create 227 | 228 | .PHONY: .img.release.manifest.promote.% 229 | .img.release.manifest.promote.%: .img.release.promote 230 | @$(INFO) docker buildx imagetools create --tag $(DOCKER_REGISTRY)/$*:$(CHANNEL) $(patsubst %,$(DOCKER_REGISTRY)/$*:$(IMAGE_TAG)-%,$(IMAGE_ARCHS)) 231 | @[ "$(CHANNEL)" = "master" ] ||\ 232 | docker buildx imagetools create \ 233 | --tag $(DOCKER_REGISTRY)/$*:$(CHANNEL)-$(PROMOTE_TAG) \ 234 | $(patsubst %,$(DOCKER_REGISTRY)/$*:$(IMAGE_TAG)-%,$(IMAGE_ARCHS)) || $(FAIL) 235 | 236 | @# Republish images to the PROMOTE_TAG when promoting on stable channel 237 | @[ "$(CHANNEL)" != "stable" ] || [ "$(PROMOTE_TAG)" = "$(IMAGE_TAG)" ] ||\ 238 | docker buildx imagetools create \ 239 | --tag $(DOCKER_REGISTRY)/$*:$(PROMOTE_TAG) \ 240 | $(patsubst %,$(DOCKER_REGISTRY)/$*:$(IMAGE_TAG)-%,$(IMAGE_ARCHS)) || $(FAIL) 241 | 242 | @docker buildx imagetools create \ 243 | --tag $(DOCKER_REGISTRY)/$*:$(CHANNEL) \ 244 | $(patsubst %,$(DOCKER_REGISTRY)/$*:$(IMAGE_TAG)-%,$(IMAGE_ARCHS)) || $(FAIL) 245 | 246 | @$(OK) docker buildx imagetools create 247 | 248 | .img.release.build: ;@ 249 | .img.release.publish: ;@ 250 | .img.release.promote: ;@ 251 | .img.release.clean: ;@ 252 | 253 | .PHONY: img.prune .img.done .img.clean .do.img.clean .img.release.build .img.release.publish .img.release.promote 254 | .PHONY: .img.release.clean .img.cache img.publish 255 | 256 | # ==================================================================================== 257 | # Common Targets 258 | 259 | # if IMAGES is defined then invoke and build each image identified 260 | ifneq ($(IMAGES),) 261 | 262 | ifeq ($(DOCKER_REGISTRY),) 263 | $(error the variable DOCKER_REGISTRY must be set prior to including image.mk) 264 | endif 265 | 266 | .do.build.image.%: 267 | ifeq ($(filter linux_%,$(PLATFORM)),) 268 | @$(WARN) skipping docker build for $* on PLATFORM=$(PLATFORM) 269 | else 270 | @$(MAKE) -C $(IMAGE_DIR)/$* PLATFORM=$(PLATFORM) 271 | endif 272 | 273 | .do.build.images: $(foreach i,$(IMAGES), .do.build.image.$(i)) ; 274 | .build.artifacts.platform: .do.build.images 275 | .build.done: .img.cache .img.done 276 | clean: .img.clean .img.release.clean 277 | 278 | .publish.init: .img.release.build 279 | 280 | img.publish: $(addprefix .img.release.manifest.publish.,$(IMAGES)) 281 | 282 | # only publish images for master and release branches 283 | ifneq ($(filter master release-%,$(BRANCH_NAME)),) 284 | .publish.run: img.publish 285 | endif 286 | 287 | # publish images at tag also 288 | ifneq ($(TAGS),) 289 | .publish.run: img.publish 290 | endif 291 | 292 | 293 | .promote.run: $(addprefix .img.release.manifest.promote.,$(IMAGES)) 294 | 295 | else # assume this .mk file is being included to build a single image 296 | 297 | ifeq ($(PLATFORM),darwin_amd64) # when building docker image on macOS pretend we are building for linux 298 | PLATFORM := linux_amd64 299 | endif 300 | 301 | ifneq ($(filter $(PLATFORM),$(PLATFORMS)),) 302 | .build.artifacts.platform: img.build 303 | .build.done: .img.cache .img.done 304 | clean: .img.clean 305 | else # trying to build a docker image for an invalid platform 306 | .DEFAULT_GOAL := .skip 307 | .PHONY: .skip 308 | .skip: 309 | @$(WARN) skipping docker build for $(IMAGE) for PLATFORM=$(PLATFORM) 310 | endif 311 | 312 | endif 313 | 314 | # ==================================================================================== 315 | # Special Targets 316 | 317 | define IMAGE_HELPTEXT 318 | Image Targets: 319 | img.prune Prune orphaned and cached images. 320 | 321 | Image Options: 322 | PRUNE_HOURS The number of hours from when an image is last used 323 | for it to be considered a target for pruning. 324 | Default is 48 hours. 325 | PRUNE_KEEP The minimum number of cached images to keep. 326 | Default is 24 images. 327 | 328 | endef 329 | export IMAGE_HELPTEXT 330 | 331 | .img.help: 332 | @echo "$$IMAGE_HELPTEXT" 333 | 334 | .help: .img.help 335 | 336 | .PHONY: .img.help 337 | 338 | endif # __DOCKER_MAKEFILE__ 339 | -------------------------------------------------------------------------------- /build/makelib/k8s-tools.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Upbound Authors. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ifndef __K8S_TOOLS_MAKEFILE__ 16 | __K8S_TOOLS_MAKEFILE__ := included 17 | 18 | # ==================================================================================== 19 | # tools 20 | 21 | # kubectl download and install 22 | KUBECTL_VERSION ?= 1.26.4 23 | KUBECTL_DOWNLOAD_URL ?= https://storage.googleapis.com/kubernetes-release/release/v$(KUBECTL_VERSION)/bin/$(HOSTOS)/$(HOSTARCH)/kubectl 24 | $(eval $(call tool.download,kubectl,$(KUBECTL_VERSION),$(KUBECTL_DOWNLOAD_URL))) 25 | 26 | # kind download and install 27 | KIND_VERSION ?= 0.12.0 28 | KIND_DOWNLOAD_URL ?= https://github.com/kubernetes-sigs/kind/releases/download/v$(KIND_VERSION)/kind-$(HOSTOS)-$(HOSTARCH) 29 | $(eval $(call tool.download,kind,$(KIND_VERSION),$(KIND_DOWNLOAD_URL))) 30 | 31 | # kind download and install 32 | KUSTOMIZE_VERSION ?= 4.5.4 33 | KUSTOMIZE_DOWNLOAD_URL ?=https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v$(KUSTOMIZE_VERSION)/kustomize_v$(KUSTOMIZE_VERSION)_$(HOST_PLATFORM).tar.gz 34 | $(eval $(call tool.download.tar.gz,kustomize,$(KUSTOMIZE_VERSION),$(KUSTOMIZE_DOWNLOAD_URL),kustomize,0)) 35 | 36 | endif # __K8S_TOOLS_MAKEFILE__ 37 | 38 | -------------------------------------------------------------------------------- /build/makelib/kubebuilder.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Pressinfra Authors. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ifndef __KUBEBUILDER_MAKEFILE__ 16 | __KUBEBUILDER_MAKEFILE__ := included 17 | 18 | include $(COMMON_SELF_DIR)/golang.mk 19 | 20 | # ==================================================================================== 21 | # Options 22 | 23 | CRD_DIR ?= config/crd/bases 24 | RBAC_DIR ?= config/rbac 25 | 26 | BOILERPLATE_FILE ?= hack/boilerplate.go.txt 27 | 28 | CONTROLLER_GEN_CRD_OPTIONS ?= crd:generateEmbeddedObjectMeta=true output:crd:artifacts:config=$(CRD_DIR) 29 | CONTROLLER_GEN_RBAC_OPTIONS ?= rbac:roleName=manager-role output:rbac:artifacts:config=$(RBAC_DIR) 30 | CONTROLLER_GEN_WEBHOOK_OPTIONS ?= webhook 31 | CONTROLLER_GEN_OBJECT_OPTIONS ?= object:headerFile=$(BOILERPLATE_FILE) 32 | CONTROLLER_GEN_PATHS ?= $(foreach t,$(GO_SUBDIRS),paths=./$(t)/...) 33 | 34 | KUBEBUILDER_ASSETS_VERSION ?= 1.26.1 35 | KUBEBUILDER_ASSETS = $(CACHE_DIR)/kubebuilder/k8s/$(KUBEBUILDER_ASSETS_VERSION)-$(HOSTOS)-$(HOSTARCH) 36 | export KUBEBUILDER_ASSETS 37 | 38 | # ==================================================================================== 39 | # tools 40 | 41 | # setup-envtest download and install 42 | SETUP_ENVTEST_VERSION ?= 0.0.0-20220808123420-bcde6f084dd1 43 | SETUP_ENVTEST_DOWNLOAD_URL ?= sigs.k8s.io/controller-runtime/tools/setup-envtest 44 | $(eval $(call tool.go.install,setup-envtest,v$(SETUP_ENVTEST_VERSION),$(SETUP_ENVTEST_DOWNLOAD_URL))) 45 | 46 | # kubebuilder download and install 47 | KUBEBUILDER_VERSION ?= 3.10.0 48 | KUBEBUILDER_DOWNLOAD_URL ?= https://github.com/kubernetes-sigs/kubebuilder/releases/download/v$(KUBEBUILDER_VERSION)/kubebuilder_$(HOST_PLATFORM) 49 | $(eval $(call tool.download,kubebuilder,$(KUBEBUILDER_VERSION),$(KUBEBUILDER_DOWNLOAD_URL))) 50 | 51 | # controller-gen download and install 52 | CONTROLLER_GEN_VERSION ?= 0.17.1 53 | CONTROLLER_GEN_DOWNLOAD_URL ?= sigs.k8s.io/controller-tools/cmd/controller-gen 54 | $(eval $(call tool.go.install,controller-gen,v$(CONTROLLER_GEN_VERSION),$(CONTROLLER_GEN_DOWNLOAD_URL))) 55 | 56 | build.tools: |$(KUBEBUILDER_ASSETS) 57 | $(KUBEBUILDER_ASSETS): $(SETUP_ENVTEST) 58 | @echo ${TIME} ${BLUE}[TOOL]${CNone} installing kubebuilder assets for Kubernetes $(KUBEBUILDER_ASSETS_VERSION) 59 | @$(SETUP_ENVTEST) --bin-dir=$(CACHE_DIR)/kubebuilder --os=$(HOSTOS) --arch=$(HOSTARCH) use $(KUBEBUILDER_ASSETS_VERSION) 60 | @$(OK) installing kubebuilder assets for Kubernetes $(KUBEBUILDER_ASSETS_VERSION) 61 | 62 | # ==================================================================================== 63 | # Kubebuilder Targets 64 | 65 | $(eval $(call common.target,kubebuilder.manifests)) 66 | # Generate manifests e.g. CRD, RBAC etc. 67 | .do.kubebuilder.manifests: $(CONTROLLER_GEN) 68 | @$(INFO) Generating Kubernetes \(CRDs, RBAC, WebhookConfig, etc.\) manifests 69 | @$(CONTROLLER_GEN) \ 70 | $(CONTROLLER_GEN_CRD_OPTIONS) \ 71 | $(CONTROLLER_GEN_RBAC_OPTIONS) \ 72 | $(CONTROLLER_CONTROLLER_GEN_WEBHOOK_OPTIONS) \ 73 | $(CONTROLLER_GEN_PATHS) 74 | @$(OK) Generating Kubernetes \(CRDs, RBAC, WebhookConfig, etc.\) manifests 75 | .PHONY: .do.kubebuilder.manifests 76 | .kubebuilder.manifests.run: .do.kubebuilder.manifests 77 | 78 | $(eval $(call common.target,kubebuilder.code)) 79 | # Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. 80 | .do.kubebuilder.code: $(CONTROLLER_GEN) 81 | @$(INFO) Generating DeepCopy, DeepCopyInto, and DeepCopyObject code 82 | @$(CONTROLLER_GEN) \ 83 | $(CONTROLLER_GEN_OBJECT_OPTIONS) \ 84 | $(CONTROLLER_GEN_PATHS) 85 | @$(OK) Generating DeepCopy, DeepCopyInto, and DeepCopyObject code 86 | .PHONY: .do.kubebuilder.code 87 | .kubebuilder.code.run: .do.kubebuilder.code 88 | 89 | # ==================================================================================== 90 | # Common Targets 91 | 92 | .test.init: |$(KUBEBUILDER_ASSETS) 93 | go.test.unit: |$(KUBEBUILDER_ASSETS) 94 | go.generate: kubebuilder.code 95 | .generate.init: .kubebuilder.manifests.init 96 | .generate.run: .kubebuilder.manifests.run 97 | .generate.done: .kubebuilder.manifests.done 98 | 99 | # ==================================================================================== 100 | # Special Targets 101 | 102 | define KUBEBULDER_HELPTEXT 103 | Kubebuilder Targets: 104 | kubebuilder.manifests Generates Kubernetes custom resources manifests (e.g. CRDs RBACs, ...) 105 | kubebuilder.code Generates DeepCopy, DeepCopyInto, and DeepCopyObject code 106 | 107 | endef 108 | export KUBEBULDER_HELPTEXT 109 | 110 | .kubebuilder.help: 111 | @echo "$$KUBEBULDER_HELPTEXT" 112 | 113 | .help: .kubebuilder.help 114 | .PHONY: .kubebuilder.help 115 | 116 | endif # __KUBEBUILDER_MAKEFILE__ -------------------------------------------------------------------------------- /build/makelib/nodejs.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Upbound Authors. All rights reserved. 2 | # Copyright 2019 The Pressinfra Authors. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | ifndef __NODEJS_MAKEFILE__ 17 | __NODEJS_MAKEFILE__ := included 18 | 19 | # ==================================================================================== 20 | # Options 21 | 22 | # supported node versions 23 | NODE_SUPPORTED_VERSIONS ?= 10|12 24 | NODE := node 25 | 26 | SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST))) 27 | 28 | # The location of node application within this git repo. 29 | NODE_ROOT_DIR ?= $(SELF_DIR)/../.. 30 | 31 | # The location of node application source code, relative to the NODE_ROOT_DIR 32 | NODE_SRC_DIR ?= src 33 | 34 | NODE_ENV ?= production 35 | export NODE_ENV 36 | 37 | YARN := yarn 38 | YARN_MODULE_DIR := $(NODE_ROOT_DIR)/node_modules 39 | YARN_BIN_DIR := $(abspath $(NODE_ROOT_DIR)/node_modules/.bin) 40 | YARN_PACKAGE_FILE := $(NODE_ROOT_DIR)/package.json 41 | YARN_PACKAGE_LOCK_FILE := $(NODE_ROOT_DIR)/yarn.lock 42 | 43 | NODE_SRCS ?= $(abspath $(YARN_PACKAGE_FILE)) $(abspath $(YARN_PACKAGE_LOCK_FILE)) $(shell find $(abspath $(NODE_ROOT_DIR)/$(NODE_SRC_DIR)) -type f | grep -v '__tests__') 44 | 45 | YARN_CACHE_FOLDER ?= $(CACHE_DIR)/yarn 46 | export YARN_CACHE_FOLDER 47 | 48 | YARN_OUTDIR ?= $(OUTPUT_DIR)/yarn 49 | export YARN_OUTDIR 50 | 51 | EXTEND_ESLINT ?= true 52 | export EXTEND_ESLINT 53 | 54 | ESLINT_OUTPUT_DIR := $(OUTPUT_DIR)/lint/eslint 55 | 56 | # ==================================================================================== 57 | # NodeJS Tools Targets 58 | 59 | ESLINT := $(YARN_BIN_DIR)/eslint 60 | $(ESLINT): |yarn.install 61 | build.tools: $(ESLINT) 62 | 63 | # ==================================================================================== 64 | # YARN Targets 65 | 66 | .PHONY: .yarn.init 67 | .yarn.init: 68 | @if ! `$(NODE) --version | grep -q -E '^v($(NODE_SUPPORTED_VERSIONS))\.'`; then \ 69 | $(ERR) unsupported node version. Please install one of the following supported version: '$(NODE_SUPPORTED_VERSIONS)' ;\ 70 | exit 1 ;\ 71 | fi 72 | 73 | # some node packages like node-sass require platform/arch specific install. we need 74 | # to run yarn for each platform. As a result we track a stamp file per host 75 | YARN_INSTALL_STAMP := $(YARN_MODULE_DIR)/.yarn.install.$(HOST_PLATFORM).stamp 76 | 77 | # only run "yarn" if the package.json has changed 78 | $(YARN_INSTALL_STAMP): $(YARN_PACKAGE_FILE) $(YARN_PACKAGE_LOCK_FILE) 79 | @echo ${TIME} $(BLUE)[TOOL]$(CNone) yarn install 80 | @cd $(NODE_ROOT_DIR); $(YARN) --silent --frozen-lockfile --non-interactive --production=false || $(FAIL) 81 | @touch $(YARN_INSTALL_STAMP) 82 | @$(OK) yarn install 83 | 84 | yarn.install: .yarn.init $(YARN_INSTALL_STAMP) 85 | 86 | .yarn.clean: 87 | @rm -rf $(YARN_MODULE_DIR) 88 | 89 | .PHONY: yarn.install .yarn.clean 90 | 91 | # ==================================================================================== 92 | # NodeJS Targets 93 | 94 | $(ESLINT_OUTPUT_DIR)/stylecheck.xml: $(ESLINT) $(NODE_SRCS) 95 | @$(INFO) eslint 96 | @mkdir -p $(ESLINT_OUTPUT_DIR) 97 | @cd $(NODE_ROOT_DIR); $(ESLINT) '$(NODE_SRC_DIR)/**/*.{ts,tsx}' --color 98 | @touch $@ 99 | @$(OK) eslint 100 | 101 | js.lint: $(ESLINT_OUTPUT_DIR)/stylecheck.xml 102 | 103 | js.lint.fix: 104 | @$(INFO) eslint fix 105 | @cd $(NODE_ROOT_DIR); $(ESLINT) '$(NODE_SRC_DIR)/**/*.{ts,tsx}' --color 106 | @$(OK) eslint fix 107 | 108 | # common target for building a node js project 109 | $(eval $(call common.target,js.build)) 110 | 111 | # common target for testing a node js project 112 | $(eval $(call common.target,js.test)) 113 | 114 | .PHONY: js.lint js.lint.fix 115 | 116 | # ==================================================================================== 117 | # Common Targets 118 | 119 | .build.init: .yarn.init .js.build.init 120 | .build.check: js.lint 121 | .build.code: .js.build.run 122 | .build.done: .js.build.done 123 | 124 | .test.init: .js.test.init 125 | .test.run: .js.test.run 126 | .test.done: .js.test.done 127 | 128 | clean: .yarn.clean 129 | 130 | .lint.run: js.lint 131 | .fmt.run: js.lint.fix 132 | 133 | # ==================================================================================== 134 | # Special Targets 135 | 136 | define NODEJS_HELPTEXT 137 | nodejs Targets: 138 | yarn.install Installs dependencies in a make friendly manner. 139 | 140 | endef 141 | export NODEJS_HELPTEXT 142 | 143 | .PHONY: .js.help 144 | .js.help: 145 | @echo "$$NODEJS_HELPTEXT" 146 | 147 | .help: .js.help 148 | 149 | endif # __NODEJS_MAKEFILE__ 150 | -------------------------------------------------------------------------------- /build/makelib/php.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2020 The Pressinfra Authors. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ifndef __PHP_MAKEFILE__ 16 | __PHP_MAKEFILE__ := included 17 | 18 | # ==================================================================================== 19 | # Options 20 | 21 | # supported php versions 22 | PHP_SUPPORTED_VERSIONS ?= 8.0|8.1|8.2|8.3 23 | PHP := php 24 | 25 | SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST))) 26 | 27 | # The location of php application within this git repo. 28 | PHP_ROOT_DIR ?= $(SELF_DIR)/../.. 29 | 30 | # The location of php application source code, relative to the PHP_ROOT_DIR 31 | PHP_SRC_DIR ?= src 32 | 33 | COMPOSER_VERSION ?= 2.3.5 34 | COMPOSER_DOWNLOAD_URL ?= https://getcomposer.org/download/$(COMPOSER_VERSION)/composer.phar 35 | $(eval $(call tool.download,composer,$(COMPOSER_VERSION),$(COMPOSER_DOWNLOAD_URL))) 36 | 37 | COMPOSER_INSTALL_ARGS ?= --prefer-dist --classmap-authoritative 38 | COMPOSER_VENDOR_DIR := $(PHP_ROOT_DIR)/vendor 39 | COMPOSER_BIN_DIR := $(abspath $(PHP_ROOT_DIR)/vendor/bin) 40 | COMPOSER_JSON_FILE := $(PHP_ROOT_DIR)/composer.json 41 | COMPOSER_LOCK_FILE := $(PHP_ROOT_DIR)/composer.lock 42 | 43 | COMPOSER_CACHE_DIR := $(CACHE_DIR)/composer 44 | export COMPOSER_CACHE_DIR 45 | 46 | # ==================================================================================== 47 | # PHP Tools Targets 48 | 49 | PHPUNIT := $(COMPOSER_BIN_DIR)/phpunit 50 | $(PHPUNIT): |composer.install 51 | build.tools: $(PHPUNIT) 52 | 53 | PHPCS := $(COMPOSER_BIN_DIR)/phpcs 54 | $(PHPCS): |composer.install 55 | build.tools: $(PHPCS) 56 | 57 | PHPCBF := $(COMPOSER_BIN_DIR)/phpcbf 58 | $(PHPCBF): |composer.install 59 | build.tools: $(PHPCBF) 60 | 61 | # ==================================================================================== 62 | # Composer targets 63 | 64 | .PHONY: .composer.init 65 | .composer.init: 66 | @if ! `$(PHP) --version | grep -q -E '^PHP ($(PHP_SUPPORTED_VERSIONS))\.'`; then \ 67 | $(ERR) unsupported PHP version. Please install one of the following supported version: '$(PHP_SUPPORTED_VERSIONS)' ;\ 68 | exit 1 ;\ 69 | fi 70 | $(COMPOSER): .composer.init 71 | 72 | COMPOSER_INSTALL_STAMP := $(COMPOSER_VENDOR_DIR)/.composer.install.stamp 73 | 74 | # only run "composer" if the composer.json has changed 75 | $(COMPOSER_INSTALL_STAMP): $(COMPOSER) $(COMPOSER_JSON_FILE) $(COMPOSER_LOCK_FILE) 76 | @echo ${TIME} $(BLUE)[TOOL]$(CNone) composer install 77 | @cd $(PHP_ROOT_DIR); $(COMPOSER) install --no-interaction || $(FAIL) 78 | @touch $(COMPOSER_INSTALL_STAMP) 79 | @$(OK) composer install 80 | 81 | composer.install: $(COMPOSER_INSTALL_STAMP) 82 | 83 | composer.update: $(COMPOSER) 84 | @echo ${TIME} $(BLUE)[TOOL]$(CNone) composer update 85 | @cd $(PHP_ROOT_DIR); $(COMPOSER) update || $(FAIL) 86 | @touch $(COMPOSER_INSTALL_STAMP) 87 | @$(OK) composer install 88 | 89 | 90 | .composer.clean: 91 | @rm -rf $(COMPOSER_VENDOR_DIR) 92 | 93 | .PHONY: composer.install composer.update .composer.clean 94 | 95 | # ==================================================================================== 96 | # PHP Targets 97 | 98 | php.lint: 99 | @$(INFO) phpcs $(PHP_SRC_DIR) 100 | @cd $(PHP_ROOT_DIR); $(PHPCS) $(PHP_SRC_DIR) 101 | @$(OK) phpcs $(PHP_SRC_DIR) 102 | 103 | php.lint.fix: 104 | @$(INFO) phpcbf $(PHP_SRC_DIR) 105 | @cd $(PHP_ROOT_DIR); $(PHPCBF) $(PHP_SRC_DIR) 106 | @$(OK) phpcbf $(PHP_SRC_DIR) 107 | .PHONY: php.lint php.lint.fix 108 | 109 | # common target for building a php project 110 | $(eval $(call common.target,php.build)) 111 | 112 | # common target for testing a php project 113 | $(eval $(call common.target,php.test)) 114 | 115 | .PHONY: .do.php.test 116 | .php.test.run: .do.php.test 117 | .do.php.test: $(PHPUNIT) 118 | @$(INFO) phpunit 119 | @$(PHPUNIT) $(PHPUNIT_ARGS) 120 | @$(OK) phpunit 121 | 122 | 123 | # ==================================================================================== 124 | # Common Targets 125 | 126 | .build.init: .composer.init 127 | .build.check: php.lint 128 | .build.code: .php.build.run 129 | .build.done: .php.build.done 130 | 131 | .test.init: .php.test.init 132 | .test.run: .php.test.run 133 | .test.done: .php.test.done 134 | 135 | clean: .composer.clean 136 | 137 | .lint.run: php.lint 138 | .fmt.run: php.lint.fix 139 | 140 | # ==================================================================================== 141 | # Special Targets 142 | 143 | define PHP_HELPTEXT 144 | PHP Targets: 145 | composer.install Installs dependencies in a make friendly manner. 146 | composer.update Updates dependencies in a make friendly manner. 147 | 148 | endef 149 | export PHP_HELPTEXT 150 | 151 | .PHONY: .php.help 152 | .php.help: 153 | @echo "$$PHP_HELPTEXT" 154 | 155 | .help: .php.help 156 | 157 | endif # __PHP_MAKEFILE__ 158 | -------------------------------------------------------------------------------- /build/makelib/protobuf.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Upbound Authors. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ifndef __PROTOBUF_MAKEFILE__ 16 | __PROTOBUF_MAKEFILE__ := included 17 | 18 | # ==================================================================================== 19 | # Setup protobuf environment 20 | 21 | PROTOBUF_DIR ?= proto 22 | PROTOBUF_FILES ?= $(sort $(shell find $(PROTOBUF_DIR) -name "*.proto")) 23 | 24 | PROTOC_VERSION ?= 3.10.1 25 | 26 | # ==================================================================================== 27 | # Tools install targets 28 | 29 | PROTOTOOL_VERSION ?= 1.9.0 30 | PROTOTOOL_CACHE_PATH := $(TOOLS_HOST_DIR)/prototool 31 | export PROTOTOOL_CACHE_PATH 32 | 33 | PROTOTOOL_DOWNLOAD_URL ?= https://github.com/uber/prototool/releases/download/v$(PROTOTOOL_VERSION)/prototool-$(HOSTOS)-x86_64 34 | $(eval $(call tool.download,prototool,$(PROTOTOOL_VERSION),$(PROTOTOOL_DOWNLOAD_URL))) 35 | 36 | # ==================================================================================== 37 | # Protobuf Targets 38 | 39 | build.tools: .pb.prototool.cache.update 40 | .pb.prototool.cache.update: $(PROTOTOOL_CACHE_PATH)/.update 41 | $(PROTOTOOL_CACHE_PATH)/.update: $(PROTOBUF_DIR)/prototool.yaml |$(PROTOTOOL) 42 | @echo ${TIME} $(BLUE)[TOOL]$(CNone) updating prototool cache 43 | @$(PROTOTOOL) cache update $(PROTOBUF_DIR) 44 | @touch $@ 45 | @$(OK) updating prototool cache 46 | 47 | .pb.init: .pb.prototool.cache.update 48 | 49 | pb.lint: $(PROTOTOOL) 50 | @$(INFO) prototool lint 51 | @$(PROTOTOOL) lint $(PROTOBUF_DIR) || $(FAIL) 52 | @$(OK) prototool lint 53 | 54 | pb.fmt.verify: $(PROTOTOOL) 55 | @$(INFO) prototool format verify 56 | @$(PROTOTOOL) format -l $(PROTOBUF_DIR) || $(FAIL) 57 | @$(OK) prototool format verify 58 | 59 | pb.fmt: $(PROTOTOOL) 60 | @$(INFO) prototool format 61 | @$(PROTOTOOL) format -w $(PROTOBUF_DIR) || $(FAIL) 62 | @$(OK) prototool format 63 | 64 | # expose as common target so that we can hook in other generators 65 | # eg. https://github.com/dcodeIO/protobuf.js 66 | $(eval $(call common.target,pb.generate)) 67 | 68 | .pb.prototool.generate: 69 | @$(INFO) prototool generate 70 | @$(PROTOTOOL) generate $(PROTOBUF_DIR) 71 | @$(OK) prototool generate 72 | 73 | .pb.generate.init: .pb.init 74 | .pb.generate.run: .pb.prototool.generate 75 | 76 | .PHONY: .go.init go.lint go.fmt go.generate .pb.clean .pb.distclean 77 | .PHONY: .pb.prototool.cache.update .pb.prototool.generate 78 | 79 | # ==================================================================================== 80 | # Common Targets 81 | 82 | .lint.init: .pb.init 83 | .lint.run: pb.fmt.verify pb.lint 84 | 85 | .fmt.run: pb.fmt 86 | 87 | .generate.init: .pb.init 88 | .generate.run: pb.generate 89 | 90 | # ==================================================================================== 91 | # Special Targets 92 | 93 | define PROTOBUF_HELPTEXT 94 | Protobuf Targets: 95 | pb.generate Generate code from protobuf files in $(PROTOBUF_DIR) 96 | 97 | endef 98 | export PROTOBUF_HELPTEXT 99 | 100 | .PHONY: .go.help 101 | .pb.help: 102 | @echo "$$PROTOBUF_HELPTEXT" 103 | 104 | .help: .pb.help 105 | 106 | 107 | # # we use a consistent version of gofmt even while running different go compilers. 108 | # # see https://github.com/golang/go/issues/26397 for more details 109 | # PROTOC_VERSION ?= 3.10.1 110 | # PROTOC_DOWNLOAD_URL ?= https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(HOSTOS)-$(HOSTARCH).zip 111 | # $(eval $(call tool.download.zip,protoc,$(PROTOC_VERSION),$(PROTOC_DOWNLOAD_URL),bin/protoc)) 112 | 113 | endif # __PROTOBUF_MAKEFILE__ 114 | -------------------------------------------------------------------------------- /build/makelib/react.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2019 The Pressinfra Authors. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ifndef __REACT_MAKEFILE__ 16 | __REACT_MAKEFILE__ := included 17 | 18 | SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST))) 19 | include $(SELF_DIR)/nodejs.mk 20 | 21 | # ==================================================================================== 22 | # Options 23 | 24 | REACT_OUTPUT_DIR ?= $(OUTPUT_DIR)/react 25 | 26 | REACT_LOCALE_PREFIX ?= messages 27 | 28 | # ==================================================================================== 29 | # React app Targets 30 | 31 | REACT := $(YARN_BIN_DIR)/react-scripts --max_old_space_size=4096 32 | $(REACT): yarn.install 33 | build.tools: $(REACT) 34 | 35 | $(REACT_OUTPUT_DIR)/index.html: $(REACT) $(NODE_SRCS) 36 | @$(INFO) react-scripts build 37 | @cd $(NODE_ROOT_DIR); $(REACT) build 38 | @mkdir -p $(REACT_OUTPUT_DIR) 39 | @rm -rf $(REACT_OUTPUT_DIR) 40 | @mv $(NODE_ROOT_DIR)/build $(REACT_OUTPUT_DIR) 41 | @$(OK) react-scripts build 42 | react.build: $(REACT_OUTPUT_DIR)/index.html 43 | 44 | react.test: $(REACT) 45 | @$(INFO) react-scripts test 46 | @cd $(NODE_ROOT_DIR); TZ='UTC' $(REACT) test --env=jsdom --verbose --colors 47 | @$(OK) react-scripts test 48 | 49 | react.run: $(REACT) 50 | @cd $(NODE_ROOT_DIR); NODE_ENV=development BROWSER=none $(REACT) start 51 | 52 | .react.clean: 53 | @rm -rf $(REACT_OUTPUT_DIR) 54 | 55 | .PHONY: react.build react.test .react.clean 56 | 57 | ifneq ($(LANGUAGES),) 58 | I18NEXT_CONV := $(YARN_BIN_DIR)/i18next-conv 59 | REACT_GETTEXT_PARSER := $(YARN_BIN_DIR)/react-gettext-parser 60 | 61 | $(I18NEXT_CONV): yarn.install 62 | $(REACT_GETTEXT_PARSER): yarn.install 63 | build.tools: $(REACT_GETTEXT_PARSER) $(I18NEXT_CONV) 64 | 65 | .PHONY: react.collect-translations 66 | react.collect-translations: $(REACT_GETTEXT_PARSER) |$(WORK_DIR) 67 | @$(INFO) react-gettext-parser collect translations 68 | @cd $(NODE_ROOT_DIR); $(REACT_GETTEXT_PARSER) --config .gettextparser --no-wrap --output $(abspath $(WORK_DIR))/$(REACT_LOCALE_PREFIX).pot '$(NODE_SRC_DIR)/**/*.{js,ts,tsx}' 69 | # Update the .pot file only if there are changes to actual messages. We need this because the collector always updates 70 | # the POT-Creation-Date 71 | # 72 | @$(MAKELIB_BIN_DIR)/po-diff.sh $(LOCALES_DIR)/$(REACT_LOCALE_PREFIX).pot $(WORK_DIR)/$(REACT_LOCALE_PREFIX).pot || \ 73 | mv $(WORK_DIR)/$(REACT_LOCALE_PREFIX).pot $(LOCALES_DIR)/$(REACT_LOCALE_PREFIX).pot 74 | @rm -f $(WORK_DIR)/$(REACT_LOCALE_PREFIX).pot 75 | # 76 | @$(OK) react-gettext-parser collect translations 77 | 78 | react.convert-translations: $(I18NEXT_CONV) 79 | @$(INFO) i18next convert translations to json 80 | $(foreach l,$(LANGUAGES),@$(I18NEXT_CONV) --language $(l) --skipUntranslated \ 81 | --source $(LOCALES_DIR)/$(l)/$(REACT_LOCALE_PREFIX).po \ 82 | --target $(NODE_ROOT_DIR)/$(NODE_SRC_DIR)/locales/$(l).json \ 83 | > /dev/null || $(FAIL) ${\n}\ 84 | ) 85 | @$(OK) i18next convert translations to json 86 | 87 | .translations.init: react.collect-translations 88 | .translations.done: react.convert-translations 89 | endif 90 | 91 | # ==================================================================================== 92 | # Common Targets 93 | 94 | .js.build.run: react.build 95 | .js.test.run: react.test 96 | clean: .react.clean 97 | 98 | # ==================================================================================== 99 | # Special Targets 100 | 101 | define REACT_HELPTEXT 102 | React Targets: 103 | react.run Run the react application for development. 104 | 105 | endef 106 | export REACT_HELPTEXT 107 | 108 | .PHONY: .react.help 109 | .react.help: 110 | @echo "$$REACT_HELPTEXT" 111 | 112 | .help: .react.help 113 | 114 | endif # __REACT_MAKEFILE__ 115 | -------------------------------------------------------------------------------- /build/makelib/utils.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Pressinfra SRL. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ifndef __UTILS_MAKEFILE__ 16 | __UTILS_MAKEFILE__ := included 17 | 18 | COMMA := , 19 | noop= 20 | SPACE = $(noop) $(noop) 21 | 22 | # define a newline 23 | define \n 24 | 25 | 26 | endef 27 | 28 | lower = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$1)))))))))))))))))))))))))) 29 | upper = $(subst a,A,$(subst b,B,$(subst c,C,$(subst d,D,$(subst e,E,$(subst f,F,$(subst g,G,$(subst h,H,$(subst i,I,$(subst j,J,$(subst k,K,$(subst l,L,$(subst m,M,$(subst n,N,$(subst o,O,$(subst p,P,$(subst q,Q,$(subst r,R,$(subst s,S,$(subst t,T,$(subst u,U,$(subst v,V,$(subst w,W,$(subst x,X,$(subst y,Y,$(subst z,Z,$1)))))))))))))))))))))))))) 30 | list-join = $(subst $(SPACE),$(1),$(strip $(2))) 31 | 32 | # ==================================================================================== 33 | # Tools macros 34 | # 35 | # Theses macros are used to install tools in an idempotent, cache friendly way. 36 | 37 | define tool 38 | $(subst -,_,$(call upper,$(1))) := $$(TOOLS_BIN_DIR)/$(1) 39 | 40 | build.tools: $$(TOOLS_BIN_DIR)/$(1) 41 | $$(TOOLS_BIN_DIR)/$(1): $$(TOOLS_HOST_DIR)/$(1)-v$(2) |$$(TOOLS_BIN_DIR) 42 | @ln -sf $$< $$@ 43 | endef 44 | 45 | # Creates a target for downloading a tool from a given url 46 | # 1 tool, 2 version, 3 download url 47 | define tool.download 48 | $(call tool,$(1),$(2)) 49 | 50 | $$(TOOLS_HOST_DIR)/$(1)-v$(2): |$$(TOOLS_HOST_DIR) 51 | @echo ${TIME} ${BLUE}[TOOL]${CNone} installing $(1) version $(2) from $(3) 52 | @curl -fsSLo $$@ $(3) || $$(FAIL) 53 | @chmod +x $$@ 54 | @$$(OK) installing $(1) version $(2) from $(3) 55 | endef # tool.download 56 | 57 | # Creates a target for downloading and unarchiving a tool from a given url 58 | # 1 tool, 2 version, 3 download url, 4 tool path within archive, 5 tar strip components 59 | define tool.download.tar.gz 60 | $(call tool,$(1),$(2)) 61 | 62 | ifeq ($(4),) 63 | $(1)_TOOL_ARCHIVE_PATH = $(1) 64 | else 65 | $(1)_TOOL_ARCHIVE_PATH = $(4) 66 | endif 67 | 68 | 69 | $$(TOOLS_HOST_DIR)/$(1)-v$(2): |$$(TOOLS_HOST_DIR) 70 | @echo ${TIME} ${BLUE}[TOOL]${CNone} installing $(1) version $(2) from $(3) 71 | @mkdir -p $$(TOOLS_HOST_DIR)/tmp-$(1)-v$(2) || $$(FAIL) 72 | ifeq ($(5),) 73 | @curl -fsSL $(3) | tar -xz --strip-components=1 -C $$(TOOLS_HOST_DIR)/tmp-$(1)-v$(2) || $$(FAIL) 74 | else 75 | @curl -fsSL $(3) | tar -xz --strip-components=$(5) -C $$(TOOLS_HOST_DIR)/tmp-$(1)-v$(2) || $$(FAIL) 76 | endif 77 | @mv $$(TOOLS_HOST_DIR)/tmp-$(1)-v$(2)/$$($(1)_TOOL_ARCHIVE_PATH) $$@ || $(FAIL) 78 | @chmod +x $$@ 79 | @rm -rf $$(TOOLS_HOST_DIR)/tmp-$(1)-v$(2) 80 | @$$(OK) installing $(1) version $(2) from $(3) 81 | endef # tool.download.tar.gz 82 | 83 | 84 | endif # __UTILS_MAKEFILE__ 85 | -------------------------------------------------------------------------------- /build/makelib/wordpress.mk: -------------------------------------------------------------------------------- 1 | # Copyright 2020 The Pressinfra Authors. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ifndef __WORDPRESS_MAKEFILE__ 16 | __WORDPRESS_MAKEFILE__ := included 17 | 18 | SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST))) 19 | include $(SELF_DIR)/php.mk 20 | 21 | # ==================================================================================== 22 | # Options 23 | 24 | WP_VERSION ?= master 25 | 26 | WP_OUTPUT_DIR := $(OUTPUT_DIR)/wordpress-$(WP_VERSION) 27 | 28 | ifeq ($(WP_VERSION),master) 29 | WP_DOWNLOAD_URL ?= https://github.com/WordPress/wordpress-develop/archive/$(WP_VERSION).tar.gz 30 | else 31 | WP_DOWNLOAD_URL ?= https://wordpress.org/wordpress-$(WP_VERSION).tar.gz 32 | endif 33 | WP_ARCHIVE := $(CACHE_DIR)/wordpress-$(WP_VERSION).tar.gz 34 | 35 | 36 | WP_TESTS_VERSION ?= $(WP_VERSION) 37 | WP_TESTS_DIR ?= $(WORK_DIR)/wordpress-develop-$(WP_VERSION) 38 | WP_TESTS_CONFIG ?= $(abspath $(PHP_ROOT_DIR))/tests/wp-tests-config.php 39 | WP_TESTS_DOWNLOAD_URL ?= https://github.com/WordPress/wordpress-develop/archive/$(WP_VERSION).tar.gz 40 | WP_TESTS_ARCHIVE := $(CACHE_DIR)/wordpress-develop-$(WP_VERSION).tar.gz 41 | 42 | # ===================================================================================== 43 | # WordPress Targets 44 | 45 | $(WP_TESTS_ARCHIVE): 46 | @$(INFO) fetching $(notdir $@) from $(WP_TESTS_DOWNLOAD_URL) 47 | @curl -sLo "$@" "$(WP_TESTS_DOWNLOAD_URL)" || $(FAIL) 48 | @$(OK) fetching $(notdir $@) from $(WP_TESTS_DOWNLOAD_URL) 49 | 50 | $(WP_TESTS_DIR)/src/wp-includes/version.php: $(WP_TESTS_ARCHIVE) 51 | @$(INFO) unpacking $< 52 | @rm -rf $(WP_TESTS_DIR) && mkdir -p $(WP_TESTS_DIR) 53 | @tar -zxf $< -C $(WP_TESTS_DIR) --strip-components 1 54 | @cp tests/wp-tests-config.php $(WP_TESTS_DIR) 55 | @mkdir -p $(WP_TESTS_DIR)/src/wp-content/uploads 56 | @test -f $@ && touch $@ || $(FAIL) 57 | @$(OK) unpacking $< 58 | 59 | $(WP_TESTS_DIR)/wp-tests-config.php: $(WP_TESTS_CONFIG) $(WP_TESTS_DIR)/src/wp-includes/version.php 60 | @cp $(WP_TESTS_CONFIG) $@ 61 | 62 | # add WP_TESTS_DIR env var for running tests 63 | .do.php.test: PHPUNIT:=WP_TESTS_DIR=$(WP_TESTS_DIR) $(PHPUNIT) 64 | 65 | $(WP_ARCHIVE): 66 | @$(INFO) fetching $(notdir $@) from $(WP_DOWNLOAD_URL) 67 | @curl -sLo "$@" "$(WP_DOWNLOAD_URL)" || $(FAIL) 68 | @$(OK) fetching $(notdir $@) from $(WP_DOWNLOAD_URL) 69 | 70 | $(WP_OUTPUT_DIR)/wp-includes/version.php: $(WP_ARCHIVE) 71 | @$(INFO) unpacking $< 72 | @rm -rf $(WP_OUTPUT_DIR) && mkdir -p $(WP_OUTPUT_DIR) 73 | @tar -zxf $< -C $(WP_OUTPUT_DIR) --strip-components 1 74 | @test -f $@ && touch $@ || $(FAIL) 75 | @$(OK) unpacking $< 76 | 77 | $(eval $(call common.target,wordpress.build)) 78 | .wordpress.build.init: $(WP_OUTPUT_DIR)/wp-includes/version.php 79 | 80 | # ==================================================================================== 81 | # Common Targets 82 | 83 | .php.test.init: $(WP_TESTS_DIR)/wp-tests-config.php .php.test.init.composer 84 | 85 | .php.test.init.composer: 86 | @# run composer install to fix: Error: The PHPUnit Polyfills library is a requirement 87 | cd $(WP_TESTS_DIR) && composer install 88 | 89 | .build.artifacts: wordpress.build 90 | 91 | endif # __WORDPRESS_MAKEFILE__ 92 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | /* 18 | Package controllerutil provides various utility code for writing kubernetes 19 | controllers using kubebuilder and controller-runtime. We extract here common 20 | code from our controllers code base. 21 | */ 22 | package controllerutil 23 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/presslabs/controller-util 2 | 3 | go 1.22.0 4 | toolchain go1.24.1 5 | 6 | require ( 7 | code.cloudfoundry.org/lager v2.0.0+incompatible 8 | github.com/blendle/zapdriver v1.3.1 9 | github.com/go-logr/logr v1.4.2 10 | github.com/go-logr/zapr v1.3.0 11 | github.com/go-test/deep v1.1.1 12 | github.com/iancoleman/strcase v0.3.0 13 | github.com/imdario/mergo v0.3.16 14 | github.com/onsi/ginkgo/v2 v2.23.4 15 | github.com/onsi/gomega v1.37.0 16 | go.uber.org/zap v1.27.0 17 | golang.org/x/net v0.39.0 18 | k8s.io/api v0.30.6 19 | k8s.io/apimachinery v0.30.6 20 | k8s.io/client-go v0.30.6 21 | sigs.k8s.io/controller-runtime v0.18.5 22 | ) 23 | 24 | require ( 25 | github.com/beorn7/perks v1.0.1 // indirect 26 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 27 | github.com/davecgh/go-spew v1.1.1 // indirect 28 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect 29 | github.com/evanphx/json-patch/v5 v5.9.0 // indirect 30 | github.com/fsnotify/fsnotify v1.7.0 // indirect 31 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 32 | github.com/go-openapi/jsonreference v0.20.2 // indirect 33 | github.com/go-openapi/swag v0.22.3 // indirect 34 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 35 | github.com/gogo/protobuf v1.3.2 // indirect 36 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 37 | github.com/golang/protobuf v1.5.4 // indirect 38 | github.com/google/gnostic-models v0.6.8 // indirect 39 | github.com/google/go-cmp v0.7.0 // indirect 40 | github.com/google/gofuzz v1.2.0 // indirect 41 | github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect 42 | github.com/google/uuid v1.3.0 // indirect 43 | github.com/josharian/intern v1.0.0 // indirect 44 | github.com/json-iterator/go v1.1.12 // indirect 45 | github.com/mailru/easyjson v0.7.7 // indirect 46 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 47 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 48 | github.com/modern-go/reflect2 v1.0.2 // indirect 49 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 50 | github.com/onsi/ginkgo v1.16.5 // indirect 51 | github.com/pkg/errors v0.9.1 // indirect 52 | github.com/prometheus/client_golang v1.16.0 // indirect 53 | github.com/prometheus/client_model v0.4.0 // indirect 54 | github.com/prometheus/common v0.44.0 // indirect 55 | github.com/prometheus/procfs v0.12.0 // indirect 56 | github.com/spf13/pflag v1.0.5 // indirect 57 | go.uber.org/automaxprocs v1.6.0 // indirect 58 | go.uber.org/multierr v1.11.0 // indirect 59 | golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect 60 | golang.org/x/oauth2 v0.12.0 // indirect 61 | golang.org/x/sys v0.32.0 // indirect 62 | golang.org/x/term v0.31.0 // indirect 63 | golang.org/x/text v0.24.0 // indirect 64 | golang.org/x/time v0.3.0 // indirect 65 | golang.org/x/tools v0.31.0 // indirect 66 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect 67 | google.golang.org/appengine v1.6.7 // indirect 68 | google.golang.org/protobuf v1.36.5 // indirect 69 | gopkg.in/inf.v0 v0.9.1 // indirect 70 | gopkg.in/yaml.v2 v2.4.0 // indirect 71 | gopkg.in/yaml.v3 v3.0.1 // indirect 72 | k8s.io/apiextensions-apiserver v0.30.1 // indirect 73 | k8s.io/klog/v2 v2.120.1 // indirect 74 | k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect 75 | k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect 76 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 77 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 78 | sigs.k8s.io/yaml v1.3.0 // indirect 79 | ) 80 | 81 | replace gopkg.in/fsnotify.v1 => gopkg.in/fsnotify.v1 v1.4.7 82 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2020 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | -------------------------------------------------------------------------------- /pkg/beat/beat.go: -------------------------------------------------------------------------------- 1 | package beat 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "sigs.k8s.io/controller-runtime/pkg/event" 8 | ) 9 | 10 | // Beat put a generic event on a channel after a certain time (e.g.: every day). 11 | type Beat struct { 12 | C chan event.GenericEvent 13 | TickerDuration time.Duration 14 | } 15 | 16 | // NewBeat instantiate a beat. 17 | func NewBeat(tickerDuration time.Duration) *Beat { 18 | return &Beat{ 19 | C: make(chan event.GenericEvent), 20 | TickerDuration: tickerDuration, 21 | } 22 | } 23 | 24 | // Start watches for events on a channel after a certain time (e.g.: every day). It's designed to be run by a manager. 25 | func (b *Beat) Start(ctx context.Context) error { 26 | ticker := time.NewTicker(b.TickerDuration) 27 | 28 | go func() { 29 | b.C <- event.GenericEvent{} 30 | 31 | for { 32 | select { 33 | case <-ctx.Done(): 34 | return 35 | 36 | case <-ticker.C: 37 | b.C <- event.GenericEvent{} 38 | } 39 | } 40 | }() 41 | 42 | return nil 43 | } 44 | -------------------------------------------------------------------------------- /pkg/log/adapters/lager/lager.go: -------------------------------------------------------------------------------- 1 | package lager 2 | 3 | import ( 4 | "fmt" 5 | 6 | "code.cloudfoundry.org/lager" 7 | "go.uber.org/zap" 8 | ) 9 | 10 | var _ lager.Logger = &ZapAdapter{} 11 | 12 | // ZapAdapter is an adapter for lager log interface using zap logger. 13 | type ZapAdapter struct { 14 | sessionID string 15 | *zap.Logger 16 | origLogger *zap.Logger 17 | context []zap.Field 18 | } 19 | 20 | func dataToFields(data ...lager.Data) []zap.Field { 21 | var fields []zap.Field 22 | 23 | for _, d := range data { 24 | for k, v := range d { 25 | fields = append(fields, zap.Any(k, v)) 26 | } 27 | } 28 | 29 | return fields 30 | } 31 | 32 | // NewZapAdapter creates a new ZapAdapter using the passed in zap.Logger. 33 | func NewZapAdapter(component string, zapLogger *zap.Logger) *ZapAdapter { 34 | logger := zapLogger.Named(component) 35 | 36 | return &ZapAdapter{ 37 | sessionID: component, 38 | Logger: logger, 39 | origLogger: logger, 40 | } 41 | } 42 | 43 | // RegisterSink of a ZapAdapter does noting as sinnk is configured in the 44 | // underlying zap logger. 45 | func (l *ZapAdapter) RegisterSink(_ lager.Sink) {} 46 | 47 | // Session creates a new logger appending task to the current session. 48 | func (l *ZapAdapter) Session(task string, data ...lager.Data) lager.Logger { 49 | newSession := fmt.Sprintf("%s.%s", l.sessionID, task) 50 | logger := &ZapAdapter{ 51 | sessionID: newSession, 52 | origLogger: l.origLogger, 53 | Logger: l.origLogger.Named(task), 54 | context: append(l.context, dataToFields(data...)...), 55 | } 56 | 57 | return logger 58 | } 59 | 60 | // SessionName returns the current logging session name. 61 | func (l *ZapAdapter) SessionName() string { 62 | return l.sessionID 63 | } 64 | 65 | // WithData returns a new logger with specified data fields set. 66 | func (l *ZapAdapter) WithData(data lager.Data) lager.Logger { 67 | logger := &ZapAdapter{ 68 | sessionID: l.sessionID, 69 | origLogger: l.origLogger, 70 | Logger: l.origLogger.With(dataToFields(data)...), 71 | context: l.context, 72 | } 73 | 74 | return logger 75 | } 76 | 77 | // Debug logs a debug message. 78 | func (l *ZapAdapter) Debug(action string, data ...lager.Data) { 79 | l.Logger.Debug(action, append(l.context, dataToFields(data...)...)...) 80 | } 81 | 82 | // Info logs a informative message. 83 | func (l *ZapAdapter) Info(action string, data ...lager.Data) { 84 | l.Logger.Info(action, append(l.context, dataToFields(data...)...)...) 85 | } 86 | 87 | // Error logs an error message. 88 | func (l *ZapAdapter) Error(action string, err error, data ...lager.Data) { 89 | fields := append([]zap.Field{zap.Error(err)}, l.context...) 90 | fields = append(fields, dataToFields(data...)...) 91 | l.Logger.Error(action, fields...) 92 | } 93 | 94 | // Fatal logs an fatal error message. 95 | func (l *ZapAdapter) Fatal(action string, err error, data ...lager.Data) { 96 | fields := append([]zap.Field{zap.Error(err)}, l.context...) 97 | fields = append(fields, dataToFields(data...)...) 98 | l.Logger.Fatal(action, fields...) 99 | } 100 | -------------------------------------------------------------------------------- /pkg/log/log.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Pressinfra SRL 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package log 18 | 19 | import ( 20 | "io" 21 | 22 | "github.com/blendle/zapdriver" 23 | "github.com/go-logr/logr" 24 | "go.uber.org/zap" 25 | "go.uber.org/zap/zapcore" 26 | "sigs.k8s.io/controller-runtime/pkg/log" 27 | zaplog "sigs.k8s.io/controller-runtime/pkg/log/zap" 28 | ) 29 | 30 | var ( 31 | // Log is the base logger used by kubebuilder. It delegates 32 | // to another logr.Logger. You *must* call SetLogger to 33 | // get any actual logging. 34 | Log = log.Log 35 | 36 | // KBLog is a base parent logger. 37 | KBLog logr.Logger 38 | 39 | // SetLogger sets a concrete logging implementation for all deferred Loggers. 40 | SetLogger = log.SetLogger 41 | ) 42 | 43 | func init() { //nolint: gochecknoinits 44 | KBLog = Log.WithName("kubebuilder") 45 | } 46 | 47 | // RawStackdriverZapLoggerTo returns a new zap.Logger configured with KubeAwareEncoder and StackDriverEncoder. 48 | func RawStackdriverZapLoggerTo(destWriter io.Writer, development bool, opts ...zap.Option) *zap.Logger { 49 | return zaplog.NewRaw(zaplog.UseDevMode(development), zaplog.WriteTo(destWriter), withStackDriverEncoder(), zaplog.RawZapOpts(opts...)) 50 | } 51 | 52 | func withStackDriverEncoder() zaplog.Opts { 53 | return func(o *zaplog.Options) { 54 | var enc zapcore.Encoder 55 | 56 | if o.Development { 57 | encCfg := zapdriver.NewDevelopmentEncoderConfig() 58 | enc = zapcore.NewConsoleEncoder(encCfg) 59 | } else { 60 | encCfg := zapdriver.NewProductionEncoderConfig() 61 | enc = zapcore.NewJSONEncoder(encCfg) 62 | } 63 | 64 | o.Encoder = enc 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /pkg/log/log_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2020 Pressinfra SRL 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package log 18 | 19 | import ( 20 | "bytes" 21 | "encoding/json" 22 | "errors" 23 | "fmt" 24 | "math/rand" 25 | "testing" 26 | 27 | . "github.com/onsi/ginkgo/v2" 28 | . "github.com/onsi/gomega" 29 | 30 | "github.com/go-logr/logr" 31 | "github.com/go-logr/zapr" 32 | "go.uber.org/zap" 33 | 34 | corev1 "k8s.io/api/core/v1" 35 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 36 | ) 37 | 38 | func TestLogger(t *testing.T) { 39 | RegisterFailHandler(Fail) 40 | RunSpecs(t, "Test log configuration Suite") 41 | } 42 | 43 | var _ = Describe("Logging tests", func() { 44 | Context("production stackdrive logger", func() { 45 | var ( 46 | name, ns string 47 | logOutBuffer *bytes.Buffer 48 | zapLogger *zap.Logger 49 | logger logr.Logger 50 | secret *corev1.Secret 51 | ) 52 | 53 | BeforeEach(func() { 54 | r := rand.Int31() //nolint: gosec 55 | name = fmt.Sprintf("test-%d", r) 56 | ns = fmt.Sprintf("default-%d", r) 57 | 58 | secret = &corev1.Secret{ 59 | ObjectMeta: metav1.ObjectMeta{ 60 | Name: name, 61 | Namespace: ns, 62 | }, 63 | } 64 | 65 | var logOut []byte 66 | logOutBuffer = bytes.NewBuffer(logOut) 67 | zapLogger = RawStackdriverZapLoggerTo(logOutBuffer, false) 68 | logger = zapr.NewLogger(zapLogger) 69 | }) 70 | 71 | It("should output a summary for k8s object", func() { 72 | // log new entry and flush it 73 | logger.Info("test log", "key", secret) 74 | Expect(zapLogger.Sync()).To(Succeed()) 75 | 76 | // unmarshal logs and assert on them 77 | var data map[string]interface{} 78 | Expect(json.Unmarshal(logOutBuffer.Bytes(), &data)).To(Succeed()) 79 | 80 | // check that is used the stackdriver logger 81 | Expect(data).To(HaveKey("severity")) 82 | 83 | // assert key field encoded with KubeAwareEncoder 84 | Expect(data).To(HaveKey("key")) 85 | Expect(data["key"]).To(HaveKeyWithValue("name", name)) 86 | Expect(data["key"]).To(HaveKeyWithValue("namespace", ns)) 87 | }) 88 | 89 | It("should output summary even if uses log.WithValues", func() { 90 | // NOTE: objects logged with logger.WithValues are not serialized using KubeAwareEncoder encoder 91 | Skip("bug not fixed") 92 | 93 | // set WithValues a key 94 | logger = logger.WithValues("withValuesKey", secret) 95 | 96 | // log new entry and flush it 97 | logger.Info("test log", "key", secret) 98 | Expect(zapLogger.Sync()).To(Succeed()) 99 | 100 | // unmarshal logs and assert on them 101 | var data map[string]interface{} 102 | Expect(json.Unmarshal(logOutBuffer.Bytes(), &data)).To(Succeed()) 103 | 104 | // assert withValuesKey field 105 | Expect(data).To(HaveKey("withValuesKey")) 106 | Expect(data["withValuesKey"]).To(HaveKeyWithValue("name", name)) 107 | Expect(data["withValuesKey"]).To(HaveKeyWithValue("namespace", ns)) 108 | }) 109 | }) 110 | 111 | Context("development stackdrive logger", func() { 112 | var ( 113 | logOutBuffer *bytes.Buffer 114 | zapLogger *zap.Logger 115 | logger logr.Logger 116 | ) 117 | 118 | BeforeEach(func() { 119 | var logOut []byte 120 | logOutBuffer = bytes.NewBuffer(logOut) 121 | zapLogger = RawStackdriverZapLoggerTo(logOutBuffer, true) 122 | logger = zapr.NewLogger(zapLogger) 123 | }) 124 | 125 | It("should print stacktrace in development mode", func() { 126 | logger.Error(errors.New("test error message"), "logging a stacktrace") //nolint: goerr113 127 | 128 | // assert a piece of stacktrace 129 | Expect(logOutBuffer.String()).To(ContainSubstring("github.com/onsi/ginkgo/v2")) 130 | }) 131 | }) 132 | }) 133 | -------------------------------------------------------------------------------- /pkg/mergo/transformers/transformers.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package transformers provide mergo transformers for Kubernetes objects. 18 | package transformers 19 | 20 | import ( 21 | "errors" 22 | "reflect" 23 | 24 | "github.com/imdario/mergo" 25 | corev1 "k8s.io/api/core/v1" 26 | "k8s.io/apimachinery/pkg/api/resource" 27 | ) 28 | 29 | // TransformerMap is a mergo.Transformers implementation. 30 | type TransformerMap map[reflect.Type]func(dst, src reflect.Value) error //nolint: recvcheck 31 | 32 | // PodSpec mergo transformers for corev1.PodSpec. 33 | var PodSpec TransformerMap 34 | 35 | var ( 36 | errCannotMerge = errors.New("cannot merge when key type differs") 37 | errCannotOverwrite = errors.New("cannot overwrite the given values") 38 | ) 39 | 40 | func init() { //nolint: gochecknoinits 41 | PodSpec = TransformerMap{ 42 | reflect.TypeOf([]corev1.Container{}): PodSpec.MergeListByKey("Name", mergo.WithOverride), 43 | reflect.TypeOf([]corev1.ContainerPort{}): PodSpec.MergeListByKey("ContainerPort", mergo.WithOverride), 44 | reflect.TypeOf([]corev1.EnvVar{}): PodSpec.MergeListByKey("Name", mergo.WithOverride), 45 | reflect.TypeOf(corev1.EnvVar{}): PodSpec.OverrideFields("Value", "ValueFrom"), 46 | reflect.TypeOf(corev1.VolumeSource{}): PodSpec.NilOtherFields(), 47 | reflect.TypeOf([]corev1.Toleration{}): PodSpec.MergeListByKey("Key", mergo.WithOverride), 48 | reflect.TypeOf([]corev1.Volume{}): PodSpec.MergeListByKey("Name", mergo.WithOverride), 49 | reflect.TypeOf([]corev1.LocalObjectReference{}): PodSpec.MergeListByKey("Name", mergo.WithOverride), 50 | reflect.TypeOf([]corev1.HostAlias{}): PodSpec.MergeListByKey("IP", mergo.WithOverride), 51 | reflect.TypeOf([]corev1.VolumeMount{}): PodSpec.MergeListByKey("MountPath", mergo.WithOverride), 52 | reflect.TypeOf(corev1.Affinity{}): PodSpec.OverrideFields("NodeAffinity", "PodAffinity", "PodAntiAffinity"), 53 | reflect.TypeOf(""): overwrite, 54 | reflect.TypeOf(new(string)): overwrite, 55 | reflect.TypeOf(new(int32)): overwrite, 56 | reflect.TypeOf(new(int64)): overwrite, 57 | reflect.TypeOf(corev1.ResourceList{}): overwrite, 58 | reflect.TypeOf(resource.Quantity{}): overwrite, 59 | reflect.TypeOf(&resource.Quantity{}): overwrite, 60 | } 61 | } 62 | 63 | // overwrite just overrites the dst value with the source. 64 | func overwrite(dst, src reflect.Value) error { 65 | if !src.IsZero() { 66 | if dst.CanSet() { 67 | dst.Set(src) 68 | 69 | return nil 70 | } 71 | 72 | return errCannotOverwrite 73 | } 74 | 75 | return nil 76 | } 77 | 78 | // Transformer implements mergo.Tansformers interface for TransformenrMap. 79 | func (s TransformerMap) Transformer(t reflect.Type) func(dst, src reflect.Value) error { 80 | if fn, ok := s[t]; ok { 81 | return fn 82 | } 83 | 84 | return nil 85 | } 86 | 87 | func (s *TransformerMap) mergeByKey(key string, dst, elem reflect.Value, opts ...func(*mergo.Config)) error { 88 | elemKey := elem.FieldByName(key) 89 | 90 | for i := range dst.Len() { 91 | dstKey := dst.Index(i).FieldByName(key) 92 | 93 | if elemKey.Kind() != dstKey.Kind() { 94 | return errCannotMerge 95 | } 96 | 97 | eq := eq(key, elem, dst.Index(i)) 98 | if eq { 99 | opts = append(opts, mergo.WithTransformers(s)) 100 | 101 | return mergo.Merge(dst.Index(i).Addr().Interface(), elem.Interface(), opts...) 102 | } 103 | } 104 | 105 | dst.Set(reflect.Append(dst, elem)) 106 | 107 | return nil 108 | } 109 | 110 | func eq(key string, a, b reflect.Value) bool { 111 | aKey := a.FieldByName(key) 112 | bKey := b.FieldByName(key) 113 | 114 | if aKey.Kind() != bKey.Kind() { 115 | return false 116 | } 117 | 118 | eq := false 119 | 120 | //nolint: exhaustive 121 | switch aKey.Kind() { 122 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 123 | eq = aKey.Int() == bKey.Int() 124 | case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: 125 | eq = aKey.Uint() == bKey.Uint() 126 | case reflect.String: 127 | eq = aKey.String() == bKey.String() 128 | case reflect.Float32, reflect.Float64: 129 | eq = aKey.Float() == bKey.Float() 130 | case reflect.Bool: 131 | eq = aKey.Bool() == bKey.Bool() 132 | case reflect.Complex128, reflect.Complex64: 133 | eq = aKey.Complex() == bKey.Complex() 134 | case reflect.Interface: 135 | eq = aKey.Interface() == bKey.Interface() 136 | case reflect.Map: 137 | eq = aKey.MapRange() == bKey.MapRange() 138 | } 139 | 140 | return eq 141 | } 142 | 143 | func indexByKey(key string, v reflect.Value, list reflect.Value) (int, bool) { 144 | for i := range list.Len() { 145 | if eq(key, v, list.Index(i)) { 146 | return i, true 147 | } 148 | } 149 | 150 | return -1, false 151 | } 152 | 153 | // MergeListByKey merges two list by element key (eg. merge []corev1.Container 154 | // by name). If mergo.WithAppendSlice options is passed, the list is extended, 155 | // while elemnts with same name are merged. If not, the list is filtered to 156 | // elements in src. 157 | func (s *TransformerMap) MergeListByKey(key string, opts ...func(*mergo.Config)) func(_, _ reflect.Value) error { 158 | conf := &mergo.Config{} 159 | 160 | for _, opt := range opts { 161 | opt(conf) 162 | } 163 | 164 | return func(dst, src reflect.Value) error { 165 | entries := reflect.MakeSlice(src.Type(), src.Len(), src.Len()) 166 | 167 | for i := range src.Len() { 168 | elem := src.Index(i) 169 | 170 | if err := s.mergeByKey(key, dst, elem, opts...); err != nil { 171 | return err 172 | } 173 | 174 | j, found := indexByKey(key, elem, dst) 175 | if found { 176 | entries.Index(i).Set(dst.Index(j)) 177 | } 178 | } 179 | 180 | if !conf.AppendSlice { 181 | dst.SetLen(entries.Len()) 182 | dst.SetCap(entries.Cap()) 183 | dst.Set(entries) 184 | } 185 | 186 | return nil 187 | } 188 | } 189 | 190 | // NilOtherFields nils all fields not defined in src. 191 | func (s *TransformerMap) NilOtherFields(opts ...func(*mergo.Config)) func(_, _ reflect.Value) error { 192 | return func(dst, src reflect.Value) error { 193 | for i := range dst.NumField() { 194 | dstField := dst.Type().Field(i) 195 | srcValue := src.FieldByName(dstField.Name) 196 | dstValue := dst.FieldByName(dstField.Name) 197 | 198 | if srcValue.Kind() == reflect.Ptr && srcValue.IsNil() { 199 | dstValue.Set(srcValue) 200 | } else { 201 | if dstValue.Kind() == reflect.Ptr && dstValue.IsNil() { 202 | dstValue.Set(srcValue) 203 | } else { 204 | opts = append(opts, mergo.WithTransformers(s)) 205 | 206 | return mergo.Merge(dstValue.Interface(), srcValue.Interface(), opts...) 207 | } 208 | } 209 | } 210 | 211 | return nil 212 | } 213 | } 214 | 215 | // OverrideFields when merging override fields even if they are zero values (eg. nil or empty list). 216 | func (s *TransformerMap) OverrideFields(fields ...string) func(_, _ reflect.Value) error { 217 | return func(dst, src reflect.Value) error { 218 | for _, field := range fields { 219 | srcValue := src.FieldByName(field) 220 | dst.FieldByName(field).Set(srcValue) 221 | } 222 | 223 | return nil 224 | } 225 | } 226 | -------------------------------------------------------------------------------- /pkg/mergo/transformers/transformers_suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package transformers_test 18 | 19 | import ( 20 | "testing" 21 | 22 | . "github.com/onsi/ginkgo/v2" 23 | . "github.com/onsi/gomega" 24 | ) 25 | 26 | func TestV1alpha1(t *testing.T) { 27 | RegisterFailHandler(Fail) 28 | RunSpecs(t, "Mergo transformers suite") 29 | } 30 | -------------------------------------------------------------------------------- /pkg/mergo/transformers/transformers_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | package transformers_test 17 | 18 | import ( 19 | "fmt" 20 | "math/rand" 21 | 22 | . "github.com/onsi/ginkgo/v2" 23 | . "github.com/onsi/gomega" 24 | 25 | "github.com/imdario/mergo" 26 | appsv1 "k8s.io/api/apps/v1" 27 | corev1 "k8s.io/api/core/v1" 28 | "k8s.io/apimachinery/pkg/api/resource" 29 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 | 31 | "github.com/presslabs/controller-util/pkg/mergo/transformers" 32 | ) 33 | 34 | var ( 35 | ten32 = int32(10) 36 | five32 = int32(5) 37 | ten64 = int64(10) 38 | five64 = int64(5) 39 | trueVar = true 40 | ) 41 | 42 | var _ = Describe("PodSpec Transformer", func() { 43 | var deployment *appsv1.Deployment 44 | 45 | BeforeEach(func() { 46 | r := rand.Int31() //nolint: gosec 47 | oneGi := resource.MustParse("1Gi") 48 | runtimeClass := "old-runtime-class-name" 49 | sharedPN := false 50 | name := fmt.Sprintf("depl-%d", r) 51 | deployment = &appsv1.Deployment{ 52 | ObjectMeta: metav1.ObjectMeta{ 53 | Name: name, 54 | Namespace: "default", 55 | }, 56 | Spec: appsv1.DeploymentSpec{ 57 | Template: corev1.PodTemplateSpec{ 58 | Spec: corev1.PodSpec{ 59 | Containers: []corev1.Container{ 60 | { 61 | Name: "main", 62 | Image: "main-image", 63 | Env: []corev1.EnvVar{ 64 | { 65 | Name: "TEST", 66 | Value: "me", 67 | }, 68 | }, 69 | }, 70 | { 71 | Name: "helper", 72 | Image: "helper-image", 73 | Ports: []corev1.ContainerPort{ 74 | { 75 | Name: "http", 76 | ContainerPort: 80, 77 | Protocol: corev1.ProtocolTCP, 78 | }, 79 | { 80 | Name: "prometheus", 81 | ContainerPort: 9125, 82 | Protocol: corev1.ProtocolTCP, 83 | }, 84 | }, 85 | Resources: corev1.ResourceRequirements{ 86 | Requests: corev1.ResourceList{ 87 | corev1.ResourceCPU: resource.MustParse("100m"), 88 | }, 89 | Limits: corev1.ResourceList{ 90 | corev1.ResourceCPU: resource.MustParse("300m"), 91 | }, 92 | }, 93 | }, 94 | }, 95 | Volumes: []corev1.Volume{ 96 | { 97 | Name: "code", 98 | VolumeSource: corev1.VolumeSource{ 99 | EmptyDir: &corev1.EmptyDirVolumeSource{ 100 | SizeLimit: &oneGi, 101 | }, 102 | }, 103 | }, 104 | { 105 | Name: "media", 106 | VolumeSource: corev1.VolumeSource{ 107 | EmptyDir: &corev1.EmptyDirVolumeSource{ 108 | SizeLimit: &oneGi, 109 | }, 110 | }, 111 | }, 112 | }, 113 | Affinity: &corev1.Affinity{ 114 | NodeAffinity: &corev1.NodeAffinity{ 115 | RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ 116 | NodeSelectorTerms: []corev1.NodeSelectorTerm{ 117 | { 118 | MatchExpressions: []corev1.NodeSelectorRequirement{ 119 | { 120 | Key: "some-label-key", 121 | Operator: corev1.NodeSelectorOpIn, 122 | Values: []string{"some-label-value"}, 123 | }, 124 | }, 125 | }, 126 | }, 127 | }, 128 | }, 129 | }, 130 | Tolerations: []corev1.Toleration{ 131 | { 132 | Key: "key1", 133 | Operator: corev1.TolerationOpExists, 134 | Effect: corev1.TaintEffectNoSchedule, 135 | }, 136 | }, 137 | PriorityClassName: "old-priority-class", 138 | TerminationGracePeriodSeconds: &ten64, 139 | Priority: &ten32, 140 | RuntimeClassName: &runtimeClass, 141 | HostIPC: false, 142 | ShareProcessNamespace: &sharedPN, 143 | }, 144 | }, 145 | }, 146 | } 147 | }) 148 | 149 | It("removes unused containers", func() { 150 | newSpec := corev1.PodSpec{ 151 | Containers: []corev1.Container{ 152 | { 153 | Name: "helper", 154 | Image: "helper-image", 155 | Ports: []corev1.ContainerPort{ 156 | { 157 | Name: "http", 158 | ContainerPort: 80, 159 | Protocol: corev1.ProtocolTCP, 160 | }, 161 | { 162 | Name: "prometheus", 163 | ContainerPort: 9125, 164 | Protocol: corev1.ProtocolTCP, 165 | }, 166 | }, 167 | }, 168 | }, 169 | } 170 | 171 | Expect(mergo.Merge(&deployment.Spec.Template.Spec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 172 | Expect(deployment.Spec.Template.Spec.Containers).To(HaveLen(1)) 173 | Expect(deployment.Spec.Template.Spec.Containers[0].Name).To(Equal("helper")) 174 | Expect(deployment.Spec.Template.Spec.Containers[0].Ports).To(HaveLen(2)) 175 | }) 176 | 177 | It("allows container rename", func() { 178 | newSpec := corev1.PodSpec{ 179 | Containers: []corev1.Container{ 180 | { 181 | Name: "new-helper", 182 | Image: "helper-image", 183 | Ports: []corev1.ContainerPort{ 184 | { 185 | Name: "http", 186 | ContainerPort: 80, 187 | Protocol: corev1.ProtocolTCP, 188 | }, 189 | { 190 | Name: "prometheus", 191 | ContainerPort: 9125, 192 | Protocol: corev1.ProtocolTCP, 193 | }, 194 | }, 195 | }, 196 | }, 197 | } 198 | 199 | Expect(mergo.Merge(&deployment.Spec.Template.Spec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 200 | Expect(deployment.Spec.Template.Spec.Containers).To(HaveLen(1)) 201 | Expect(deployment.Spec.Template.Spec.Containers[0].Name).To(Equal("new-helper")) 202 | Expect(deployment.Spec.Template.Spec.Containers[0].Ports).To(HaveLen(2)) 203 | }) 204 | 205 | It("allows container image update", func() { 206 | newSpec := deployment.Spec.Template.Spec.DeepCopy() 207 | newSpec.Containers[0].Image = "main-image-v2" 208 | Expect(mergo.Merge(&deployment.Spec.Template.Spec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 209 | Expect(deployment.Spec.Template.Spec.Containers[0].Name).To(Equal("main")) 210 | Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("main-image-v2")) 211 | }) 212 | It("merges env vars", func() { 213 | newSpec := corev1.PodSpec{ 214 | Containers: []corev1.Container{ 215 | { 216 | Name: "main", 217 | Image: "main-image", 218 | Env: []corev1.EnvVar{ 219 | { 220 | Name: "TEST-2", 221 | Value: "me-2", 222 | }, 223 | }, 224 | }, 225 | }, 226 | } 227 | 228 | Expect(mergo.Merge(&deployment.Spec.Template.Spec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 229 | Expect(deployment.Spec.Template.Spec.Containers).To(HaveLen(1)) 230 | Expect(deployment.Spec.Template.Spec.Containers[0].Env).To(HaveLen(1)) 231 | Expect(deployment.Spec.Template.Spec.Containers[0].Env[0].Name).To(Equal("TEST-2")) 232 | Expect(deployment.Spec.Template.Spec.Containers[0].Env[0].Value).To(Equal("me-2")) 233 | }) 234 | 235 | It("merges container ports", func() { 236 | newSpec := deployment.Spec.Template.Spec.DeepCopy() 237 | newSpec.Containers[1].Ports = []corev1.ContainerPort{ 238 | { 239 | Name: "prometheus", 240 | ContainerPort: 9125, 241 | Protocol: corev1.ProtocolTCP, 242 | }, 243 | } 244 | Expect(mergo.Merge(&deployment.Spec.Template.Spec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 245 | Expect(deployment.Spec.Template.Spec.Containers).To(HaveLen(2)) 246 | Expect(deployment.Spec.Template.Spec.Containers[1].Ports).To(HaveLen(1)) 247 | Expect(deployment.Spec.Template.Spec.Containers[1].Ports[0].ContainerPort).To(Equal(int32(9125))) 248 | }) 249 | 250 | It("allows prepending volume", func() { 251 | newSpec := deployment.Spec.Template.Spec.DeepCopy() 252 | newSpec.Volumes = []corev1.Volume{ 253 | { 254 | Name: "config", 255 | VolumeSource: corev1.VolumeSource{ 256 | EmptyDir: &corev1.EmptyDirVolumeSource{}, 257 | }, 258 | }, 259 | { 260 | Name: "code", 261 | VolumeSource: corev1.VolumeSource{ 262 | HostPath: &corev1.HostPathVolumeSource{}, 263 | }, 264 | }, 265 | { 266 | Name: "media", 267 | VolumeSource: corev1.VolumeSource{ 268 | EmptyDir: &corev1.EmptyDirVolumeSource{}, 269 | }, 270 | }, 271 | } 272 | Expect(mergo.Merge(&deployment.Spec.Template.Spec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 273 | Expect(deployment.Spec.Template.Spec.Volumes).To(HaveLen(3)) 274 | Expect(deployment.Spec.Template.Spec.Volumes[0].Name).To(Equal(newSpec.Volumes[0].Name)) 275 | Expect(deployment.Spec.Template.Spec.Volumes[1].Name).To(Equal(newSpec.Volumes[1].Name)) 276 | Expect(deployment.Spec.Template.Spec.Volumes[2].Name).To(Equal(newSpec.Volumes[2].Name)) 277 | 278 | Expect(deployment.Spec.Template.Spec.Volumes[1].EmptyDir).To(BeNil()) 279 | Expect(deployment.Spec.Template.Spec.Volumes[1].HostPath).ToNot(BeNil()) 280 | }) 281 | 282 | It("allows replacing volume list", func() { 283 | newSpec := deployment.Spec.Template.Spec.DeepCopy() 284 | newSpec.Volumes = []corev1.Volume{ 285 | { 286 | Name: "config", 287 | VolumeSource: corev1.VolumeSource{ 288 | EmptyDir: &corev1.EmptyDirVolumeSource{}, 289 | }, 290 | }, 291 | } 292 | Expect(mergo.Merge(&deployment.Spec.Template.Spec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 293 | Expect(deployment.Spec.Template.Spec.Volumes).To(HaveLen(1)) 294 | Expect(deployment.Spec.Template.Spec.Volumes[0].Name).To(Equal(newSpec.Volumes[0].Name)) 295 | }) 296 | 297 | It("override existing affinity with new one, instead of merging them", func() { 298 | newSpec := deployment.Spec.Template.Spec.DeepCopy() 299 | newAffinity := &corev1.Affinity{ 300 | NodeAffinity: &corev1.NodeAffinity{ 301 | PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{ 302 | { 303 | Weight: 42, 304 | Preference: corev1.NodeSelectorTerm{ 305 | MatchExpressions: []corev1.NodeSelectorRequirement{ 306 | { 307 | Key: "some-label-key", 308 | Operator: corev1.NodeSelectorOpExists, 309 | }, 310 | }, 311 | }, 312 | }, 313 | }, 314 | }, 315 | } 316 | 317 | newSpec.Affinity = newAffinity 318 | Expect(mergo.Merge(&deployment.Spec.Template.Spec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 319 | Expect(deployment.Spec.Template.Spec.Affinity).To(Equal(newAffinity)) 320 | }) 321 | 322 | It("override existing tolerations with new one", func() { 323 | newSpec := deployment.Spec.Template.Spec.DeepCopy() 324 | 325 | newTolerations := []corev1.Toleration{ 326 | { 327 | Key: "new-key", 328 | Operator: corev1.TolerationOpEqual, 329 | Effect: corev1.TaintEffectNoExecute, 330 | }, 331 | } 332 | newSpec.Tolerations = newTolerations 333 | 334 | Expect(mergo.Merge(&deployment.Spec.Template.Spec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 335 | Expect(deployment.Spec.Template.Spec.Tolerations).To(Equal(newTolerations)) 336 | }) 337 | 338 | It("should update unknown transformer type like Quantity", func() { 339 | oldSpec := deployment.Spec.Template.Spec 340 | newSpec := deployment.Spec.Template.Spec.DeepCopy() 341 | 342 | newCPU := resource.MustParse("3") 343 | newSpec.Containers[1].Resources.Requests[corev1.ResourceCPU] = newCPU 344 | 345 | Expect(mergo.Merge(&oldSpec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 346 | Expect(oldSpec.Containers[1].Resources.Requests[corev1.ResourceCPU]).To(Equal(newCPU)) 347 | }) 348 | 349 | It("should remove resources if not provided", func() { 350 | oldSpec := deployment.Spec.Template.Spec 351 | newSpec := deployment.Spec.Template.Spec.DeepCopy() 352 | 353 | // remove limits 354 | newSpec.Containers[1].Resources.Limits = corev1.ResourceList{} 355 | Expect(mergo.Merge(&oldSpec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 356 | Expect(oldSpec.Containers[1].Resources.Limits).ToNot(HaveKey(corev1.ResourceCPU)) 357 | }) 358 | 359 | It("updates the filds for string, *string, *int32, *int64, bool, *bool, *resource.Quantity", func() { 360 | oldSpec := deployment.Spec.Template.Spec 361 | newSpec := deployment.Spec.Template.Spec.DeepCopy() 362 | 363 | newQuantity := resource.MustParse("123Mi") 364 | 365 | // type string 366 | newSpec.PriorityClassName = "new-priority-class" 367 | // type *int64 368 | newSpec.TerminationGracePeriodSeconds = &five64 369 | // type *int32 370 | newSpec.Priority = &five32 371 | // type *string 372 | rcn := "new-runtime-class" 373 | newSpec.RuntimeClassName = &rcn 374 | // type bool 375 | newSpec.HostIPC = true 376 | // type *bool 377 | newSpec.ShareProcessNamespace = &trueVar 378 | // type *resource.Quantity 379 | newSpec.Volumes[0].VolumeSource.EmptyDir.SizeLimit = &newQuantity 380 | 381 | Expect(mergo.Merge(&oldSpec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 382 | 383 | Expect(oldSpec.PriorityClassName).To(Equal(newSpec.PriorityClassName)) 384 | Expect(oldSpec.TerminationGracePeriodSeconds).To(Equal(&five64)) 385 | Expect(oldSpec.Priority).To(Equal(&five32)) 386 | Expect(oldSpec.RuntimeClassName).To(Equal(&rcn)) 387 | Expect(oldSpec.HostIPC).To(Equal(newSpec.HostIPC)) 388 | Expect(oldSpec.ShareProcessNamespace).To(Equal(newSpec.ShareProcessNamespace)) 389 | Expect(oldSpec.Volumes[0].VolumeSource.EmptyDir.SizeLimit.String()).To(Equal(newQuantity.String())) 390 | }) 391 | 392 | It("should not update string with empty value", func() { 393 | oldSpec := deployment.Spec.Template.Spec 394 | newSpec := deployment.Spec.Template.Spec.DeepCopy() 395 | newSpec.PriorityClassName = "" 396 | 397 | Expect(mergo.Merge(&oldSpec, newSpec, mergo.WithTransformers(transformers.PodSpec))).To(Succeed()) 398 | Expect(oldSpec.PriorityClassName).To(Equal("old-priority-class")) 399 | }) 400 | }) 401 | -------------------------------------------------------------------------------- /pkg/meta/finalizer_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package meta 18 | 19 | import ( 20 | . "github.com/onsi/ginkgo/v2" 21 | . "github.com/onsi/gomega" 22 | 23 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 24 | ) 25 | 26 | var _ = Describe("Meta Package Finalizer", func() { 27 | const fin = "myF" 28 | 29 | DescribeTable("at AddFinalizer function call", func(existing, expected []string) { 30 | meta := &metav1.ObjectMeta{ 31 | Finalizers: existing, 32 | } 33 | AddFinalizer(meta, fin) 34 | Expect(meta.Finalizers).To(Equal(expected)) 35 | }, 36 | Entry("add if not present", []string{"f1", "f2"}, []string{"f1", "f2", fin}), 37 | Entry("no add if present", []string{"f1", fin, "f2"}, []string{"f1", fin, "f2"}), 38 | ) 39 | 40 | DescribeTable("at HasFinalizer function call", func(existing []string, expected bool) { 41 | meta := &metav1.ObjectMeta{ 42 | Finalizers: existing, 43 | } 44 | Expect(HasFinalizer(meta, fin)).To(Equal(expected)) 45 | }, 46 | Entry("returns false if not present", []string{"f1", "f2"}, false), 47 | Entry("returns true if present", []string{"f1", fin, "f2"}, true), 48 | ) 49 | 50 | DescribeTable("at RemoveFinalizer function call", func(existing, expected []string) { 51 | meta := &metav1.ObjectMeta{ 52 | Finalizers: existing, 53 | } 54 | RemoveFinalizer(meta, fin) 55 | Expect(meta.Finalizers).To(Equal(expected)) 56 | }, 57 | Entry("no remove if not present", []string{"f1", "f2"}, []string{"f1", "f2"}), 58 | Entry("remove from middle", []string{"f1", fin, "f2"}, []string{"f1", "f2"}), 59 | Entry("remove from begin", []string{fin, "f1", "f2"}, []string{"f1", "f2"}), 60 | Entry("remove from end", []string{"f1", "f2", fin}, []string{"f1", "f2"}), 61 | ) 62 | }) 63 | -------------------------------------------------------------------------------- /pkg/meta/finalizers.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package meta 18 | 19 | import ( 20 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 21 | ) 22 | 23 | // AddFinalizer add a finalizer in ObjectMeta. 24 | func AddFinalizer(meta *metav1.ObjectMeta, finalizer string) { 25 | if !HasFinalizer(meta, finalizer) { 26 | meta.Finalizers = append(meta.Finalizers, finalizer) 27 | } 28 | } 29 | 30 | // HasFinalizer returns true if ObjectMeta has the finalizer. 31 | func HasFinalizer(meta *metav1.ObjectMeta, finalizer string) bool { 32 | return containsString(meta.Finalizers, finalizer) 33 | } 34 | 35 | // RemoveFinalizer removes the finalizer from ObjectMeta. 36 | func RemoveFinalizer(meta *metav1.ObjectMeta, finalizer string) { 37 | meta.Finalizers = removeString(meta.Finalizers, finalizer) 38 | } 39 | 40 | // containsString is a helper functions to check string from a slice of strings. 41 | func containsString(slice []string, s string) bool { 42 | for _, item := range slice { 43 | if item == s { 44 | return true 45 | } 46 | } 47 | 48 | return false 49 | } 50 | 51 | // removeString is a helper functions to remove string from a slice of strings. 52 | func removeString(slice []string, s string) []string { 53 | result := []string{} 54 | 55 | for _, item := range slice { 56 | if item == s { 57 | continue 58 | } 59 | 60 | result = append(result, item) 61 | } 62 | 63 | return result 64 | } 65 | -------------------------------------------------------------------------------- /pkg/meta/meta_suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package meta 18 | 19 | import ( 20 | "testing" 21 | 22 | . "github.com/onsi/ginkgo/v2" 23 | . "github.com/onsi/gomega" 24 | ) 25 | 26 | func TestMeta(t *testing.T) { 27 | RegisterFailHandler(Fail) 28 | RunSpecs(t, "Meta Suite") 29 | } 30 | -------------------------------------------------------------------------------- /pkg/net/net.go: -------------------------------------------------------------------------------- 1 | package net 2 | 3 | import ( 4 | "math/rand" 5 | "slices" 6 | ) 7 | 8 | const ( 9 | minPrivatePort = 49152 10 | maxPrivatePort = 65535 11 | ) 12 | 13 | // RandomPortInRangeExcluding will suggest a http port in the given range, excluding the given ports. 14 | func RandomPortInRangeExcluding(startPort, stopPort int, exclude []int) int { 15 | availablePorts := []int{} 16 | 17 | for port := startPort; port <= stopPort; port++ { 18 | if !slices.Contains(exclude, port) { 19 | availablePorts = append(availablePorts, port) 20 | } 21 | } 22 | 23 | // Note: It panics if len(availablePorts) == 0. 24 | return availablePorts[rand.Intn(len(availablePorts))] //nolint: gosec 25 | } 26 | 27 | // RandomPortInRange will suggest a http port in the given range. 28 | func RandomPortInRange(startPort, stopPort int) int { 29 | return RandomPortInRangeExcluding(startPort, stopPort, []int{}) 30 | } 31 | 32 | // RandomPrivatePortExcluding will suggest a http port excluding the given ports. 33 | func RandomPrivatePortExcluding(exclude []int) int { 34 | return RandomPortInRangeExcluding(minPrivatePort, maxPrivatePort, exclude) 35 | } 36 | 37 | // RandomPrivatePort will suggest a private http port. 38 | func RandomPrivatePort() int { 39 | return RandomPrivatePortExcluding([]int{}) 40 | } 41 | -------------------------------------------------------------------------------- /pkg/predicate/class.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2024 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package predicate 18 | 19 | import ( 20 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 21 | "sigs.k8s.io/controller-runtime/pkg/event" 22 | "sigs.k8s.io/controller-runtime/pkg/predicate" 23 | ) 24 | 25 | // FilterByClassPredicate allows filtering by the class annotation. 26 | type FilterByClassPredicate struct { 27 | class string 28 | annKey string 29 | defaultClass string 30 | } 31 | 32 | var _ predicate.Predicate = &FilterByClassPredicate{} 33 | 34 | // NewFilterByClassPredicate return a new ClassPredicate. 35 | // class param represents the class of predicate. 36 | // annKey represents the class annotation key. 37 | func NewFilterByClassPredicate(class, annKey string) *FilterByClassPredicate { 38 | return &FilterByClassPredicate{ 39 | class: class, 40 | annKey: annKey, 41 | defaultClass: "", 42 | } 43 | } 44 | 45 | // WithDefaultClass represents the default value for the class annotation, used when the annotation value is empty. 46 | func (p *FilterByClassPredicate) WithDefaultClass(defaultClass string) { 47 | p.defaultClass = defaultClass 48 | } 49 | 50 | func (p *FilterByClassPredicate) matchesClass(m metav1.Object) bool { 51 | annotations := m.GetAnnotations() 52 | 53 | class, exists := annotations[p.annKey] 54 | if !exists || class == "" { 55 | class = p.defaultClass 56 | } 57 | 58 | return p.class == class 59 | } 60 | 61 | // Create returns true if the Create event should be processed. 62 | func (p *FilterByClassPredicate) Create(e event.CreateEvent) bool { 63 | return p.matchesClass(e.Object) 64 | } 65 | 66 | // Delete returns true if the Delete event should be processed. 67 | func (p *FilterByClassPredicate) Delete(e event.DeleteEvent) bool { 68 | return p.matchesClass(e.Object) 69 | } 70 | 71 | // Update returns true if the Update event should be processed. 72 | func (p *FilterByClassPredicate) Update(e event.UpdateEvent) bool { 73 | return p.matchesClass(e.ObjectNew) 74 | } 75 | 76 | // Generic returns true if the Generic event should be processed. 77 | func (p *FilterByClassPredicate) Generic(e event.GenericEvent) bool { 78 | return p.matchesClass(e.Object) 79 | } 80 | -------------------------------------------------------------------------------- /pkg/predicate/class_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2024 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package predicate 18 | 19 | import ( 20 | "fmt" 21 | "math/rand" 22 | 23 | . "github.com/onsi/ginkgo/v2" 24 | . "github.com/onsi/gomega" 25 | 26 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 27 | ) 28 | 29 | var _ = Describe("Class Predicate", func() { 30 | var ( 31 | p *FilterByClassPredicate 32 | r int32 33 | class string 34 | annKey string 35 | ) 36 | 37 | BeforeEach(func() { 38 | r = rand.Int31() //nolint: gosec 39 | 40 | class = fmt.Sprintf("class-%d", r) 41 | annKey = fmt.Sprintf("ann.key/%d", r) 42 | 43 | p = NewFilterByClassPredicate(class, annKey) 44 | }) 45 | 46 | When("predicate doesn't have a default", func() { 47 | It("doesn't match class if annotations are empty", func() { 48 | Expect(p.matchesClass(&metav1.ObjectMeta{ 49 | Annotations: map[string]string{}, 50 | })).To(BeFalse()) 51 | }) 52 | 53 | It("doesn't match class if annotations doesn't have the class key", func() { 54 | Expect(p.matchesClass(&metav1.ObjectMeta{ 55 | Annotations: map[string]string{ 56 | "not.class.key": "not.class.value", 57 | }, 58 | })).To(BeFalse()) 59 | }) 60 | }) 61 | 62 | When("predicate class is same with default class", func() { 63 | BeforeEach(func() { 64 | p.WithDefaultClass(p.class) 65 | }) 66 | 67 | It("doesn't match class if annotations are empty", func() { 68 | Expect(p.matchesClass(&metav1.ObjectMeta{ 69 | Annotations: map[string]string{}, 70 | })).To(BeTrue()) 71 | }) 72 | 73 | It("doesn't match class if annotations doesn't have the class key", func() { 74 | Expect(p.matchesClass(&metav1.ObjectMeta{ 75 | Annotations: map[string]string{ 76 | "not.class.key": "not.class.value", 77 | }, 78 | })).To(BeTrue()) 79 | }) 80 | 81 | It("matches class if class annotation is empty, but the default class and predicate class are the same", func() { 82 | Expect(p.matchesClass(&metav1.ObjectMeta{ 83 | Annotations: map[string]string{ 84 | p.annKey: "", 85 | }, 86 | })).To(BeTrue()) 87 | }) 88 | }) 89 | 90 | It("doesn't match class if class annotation is empty, but the default class and predicate class are different", func() { 91 | p.WithDefaultClass("another-class") 92 | Expect(p.matchesClass(&metav1.ObjectMeta{ 93 | Annotations: map[string]string{ 94 | p.annKey: "", 95 | }, 96 | })).To(BeFalse()) 97 | }) 98 | 99 | It("matches class if class annotation and predicate class are same", func() { 100 | Expect(p.matchesClass(&metav1.ObjectMeta{ 101 | Annotations: map[string]string{ 102 | p.annKey: p.class, 103 | }, 104 | })).To(BeTrue()) 105 | }) 106 | }) 107 | -------------------------------------------------------------------------------- /pkg/predicate/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2024 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package predicate 18 | 19 | import ( 20 | "testing" 21 | 22 | . "github.com/onsi/ginkgo/v2" 23 | . "github.com/onsi/gomega" 24 | ) 25 | 26 | func TestMeta(t *testing.T) { 27 | RegisterFailHandler(Fail) 28 | RunSpecs(t, "Predicate Suite") 29 | } 30 | -------------------------------------------------------------------------------- /pkg/rand/rand.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package rand provide functions for securely generating random strings. It 18 | // uses crypto/rand to securely generate random sequences of characters. 19 | // It is adapted from https://gist.github.com/denisbrodbeck/635a644089868a51eccd6ae22b2eb800 20 | // to support multiple character sets. 21 | package rand 22 | 23 | import ( 24 | "crypto/rand" 25 | "fmt" 26 | "io" 27 | "math/big" 28 | ) 29 | 30 | const ( 31 | lowerLetters = "abcdefghijklmnopqrstuvwxyz" 32 | upperLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" 33 | letters = lowerLetters + upperLetters 34 | digits = "0123456789" 35 | alphanumerics = letters + digits 36 | ascii = alphanumerics + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" 37 | ) 38 | 39 | // NewStringGenerator generate a cryptographically secure random sequence 40 | // generator from given characters. 41 | func NewStringGenerator(characters string) func(int) (string, error) { 42 | return func(length int) (string, error) { 43 | result := "" 44 | 45 | for { 46 | if len(result) >= length { 47 | return result, nil 48 | } 49 | 50 | num, err := rand.Int(rand.Reader, big.NewInt(int64(len(characters)))) 51 | if err != nil { 52 | return "", err 53 | } 54 | 55 | n := num.Int64() 56 | result += string(characters[n]) 57 | } 58 | } 59 | } 60 | 61 | var alphaNumericStringGenerator = NewStringGenerator(alphanumerics) 62 | 63 | // AlphaNumericString returns a cryptographically secure random sequence of 64 | // alphanumeric characters. 65 | func AlphaNumericString(length int) (string, error) { 66 | return alphaNumericStringGenerator(length) 67 | } 68 | 69 | var lowerAlphaNumericStringGenerator = NewStringGenerator(lowerLetters + digits) 70 | 71 | // LowerAlphaNumericString returns a cryptographically secure random sequence of 72 | // lower alphanumeric characters. 73 | func LowerAlphaNumericString(length int) (string, error) { 74 | return lowerAlphaNumericStringGenerator(length) 75 | } 76 | 77 | var asciiStringGenerator = NewStringGenerator(ascii) 78 | 79 | // ASCIIString returns a cryptographically secure random sequence of 80 | // printable ASCII characters, excluding space. 81 | func ASCIIString(length int) (string, error) { 82 | return asciiStringGenerator(length) 83 | } 84 | 85 | func init() { //nolint: gochecknoinits 86 | assertAvailablePRNG() 87 | } 88 | 89 | func assertAvailablePRNG() { 90 | // Assert that a cryptographically secure PRNG is available. 91 | // Panic otherwise. 92 | buf := make([]byte, 1) 93 | 94 | if _, err := io.ReadFull(rand.Reader, buf); err != nil { 95 | panic(fmt.Sprintf("crypto/rand is unavailable: Read() failed with %#v", err)) 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /pkg/rate-limiter/rate_limiter.go: -------------------------------------------------------------------------------- 1 | package ratelimiter 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strconv" 7 | "sync" 8 | "time" 9 | 10 | "github.com/go-logr/logr" 11 | "k8s.io/apimachinery/pkg/types" 12 | ctrl "sigs.k8s.io/controller-runtime" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | ) 15 | 16 | // RateLimiter is a rate limiter for controllers. 17 | type RateLimiter struct { 18 | c client.Client 19 | log logr.Logger 20 | mu sync.Mutex 21 | maxItems int 22 | notReadyItems int 23 | items map[types.NamespacedName]time.Time 24 | itemIsReady func(context.Context, client.Client, types.NamespacedName, logr.Logger) bool 25 | durationToBecomeReady time.Duration 26 | logFrequency time.Duration 27 | itemPoolingInterval time.Duration 28 | itemTimeout time.Duration 29 | } 30 | 31 | // NewRateLimiter creates a new RateLimiter. 32 | func NewRateLimiter( 33 | c client.Client, 34 | log logr.Logger, 35 | maxItems int, 36 | itemIsReady func(context.Context, client.Client, types.NamespacedName, logr.Logger) bool, 37 | durationToBecomeReady time.Duration, 38 | logFrequency time.Duration, 39 | itemPoolingInterval time.Duration, 40 | itemTimeout time.Duration, 41 | ) *RateLimiter { 42 | return &RateLimiter{ 43 | c: c, 44 | log: log, 45 | mu: sync.Mutex{}, 46 | maxItems: maxItems, 47 | items: map[types.NamespacedName]time.Time{}, 48 | itemIsReady: itemIsReady, 49 | durationToBecomeReady: durationToBecomeReady, 50 | logFrequency: logFrequency, 51 | itemPoolingInterval: itemPoolingInterval, 52 | itemTimeout: itemTimeout, 53 | } 54 | } 55 | 56 | // SetupWithManager instantiate the RateLimiter, managed by a given manager. 57 | func (r *RateLimiter) SetupWithManager(mgr ctrl.Manager) error { 58 | return mgr.Add(r) 59 | } 60 | 61 | func (r *RateLimiter) checkAndUpdateItems(ctx context.Context) { 62 | r.mu.Lock() 63 | defer r.mu.Unlock() 64 | 65 | r.notReadyItems = 0 66 | 67 | itemsToDelete := []types.NamespacedName{} 68 | 69 | for nsName, inTime := range r.items { 70 | // give the operators/controllers 10 seconds to update item status 71 | if time.Since(inTime) < r.durationToBecomeReady { 72 | continue 73 | } 74 | 75 | if time.Since(inTime) > r.itemTimeout { 76 | itemsToDelete = append(itemsToDelete, nsName) 77 | 78 | r.log.V(0).Info("timeout exceeded", "item", nsName) 79 | 80 | continue 81 | } 82 | 83 | // check item. If it is ready, remove it from buffer 84 | if r.itemIsReady(ctx, r.c, nsName, r.log) { 85 | itemsToDelete = append(itemsToDelete, nsName) 86 | 87 | continue 88 | } 89 | 90 | r.notReadyItems++ 91 | } 92 | 93 | for _, nsName := range itemsToDelete { 94 | delete(r.items, nsName) 95 | } 96 | } 97 | 98 | func (r *RateLimiter) writeLog() { 99 | r.mu.Lock() 100 | defer r.mu.Unlock() 101 | 102 | if len(r.items) > 0 { 103 | r.log.Info("stats", 104 | "noItems", fmt.Sprintf("%d/%d", len(r.items), r.maxItems), 105 | "notReadyItems", strconv.Itoa(r.notReadyItems), 106 | ) 107 | } 108 | } 109 | 110 | // Start will start the RateLimiter. 111 | func (r *RateLimiter) Start(ctx context.Context) error { 112 | wg := &sync.WaitGroup{} 113 | wg.Add(1) 114 | 115 | defer wg.Wait() 116 | 117 | go func() { 118 | defer wg.Done() 119 | 120 | for { 121 | select { 122 | case <-time.After(r.logFrequency): 123 | r.writeLog() 124 | case <-ctx.Done(): 125 | return 126 | } 127 | } 128 | }() 129 | 130 | for { 131 | select { 132 | case <-time.After(r.itemPoolingInterval): 133 | r.checkAndUpdateItems(ctx) 134 | case <-ctx.Done(): 135 | return nil 136 | } 137 | } 138 | } 139 | 140 | // ShouldReconcile check the given item. 141 | // if the item is ready for reconciliation, ShouldReconcile removes the item from buffer and returns true. 142 | func (r *RateLimiter) ShouldReconcile(nsName types.NamespacedName) bool { 143 | r.mu.Lock() 144 | defer r.mu.Unlock() 145 | 146 | if len(r.items) < r.maxItems { 147 | r.items[nsName] = time.Now() 148 | 149 | return true 150 | } 151 | 152 | return false 153 | } 154 | -------------------------------------------------------------------------------- /pkg/syncer/example_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package syncer_test 18 | 19 | import ( 20 | appsv1 "k8s.io/api/apps/v1" 21 | corev1 "k8s.io/api/core/v1" 22 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | "k8s.io/apimachinery/pkg/types" 24 | "sigs.k8s.io/controller-runtime/pkg/client" 25 | 26 | "github.com/presslabs/controller-util/pkg/syncer" 27 | ) 28 | 29 | func NewDeploymentSyncer(owner client.Object, key types.NamespacedName) syncer.Interface { 30 | deploy := &appsv1.Deployment{ 31 | ObjectMeta: metav1.ObjectMeta{ 32 | Name: key.Name, 33 | Namespace: key.Namespace, 34 | }, 35 | } 36 | 37 | // c is client.Client 38 | return syncer.NewObjectSyncer("ExampleDeployment", owner, deploy, c, func() error { 39 | // Deployment selector is immutable, so we set this value only if 40 | // a new object is going to be created 41 | if deploy.ObjectMeta.CreationTimestamp.IsZero() { 42 | deploy.Spec.Selector = &metav1.LabelSelector{ 43 | MatchLabels: map[string]string{"foo": "bar"}, 44 | } 45 | } 46 | 47 | // update the Deployment pod template 48 | deploy.Spec.Template = corev1.PodTemplateSpec{ 49 | ObjectMeta: metav1.ObjectMeta{ 50 | Labels: map[string]string{ 51 | "foo": "bar", 52 | }, 53 | }, 54 | Spec: corev1.PodSpec{ 55 | Containers: []corev1.Container{ 56 | { 57 | Name: "busybox", 58 | Image: "busybox", 59 | }, 60 | }, 61 | }, 62 | } 63 | 64 | return nil 65 | }) 66 | } 67 | -------------------------------------------------------------------------------- /pkg/syncer/external.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package syncer 18 | 19 | import ( 20 | "context" 21 | "fmt" 22 | 23 | "k8s.io/apimachinery/pkg/runtime" 24 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 25 | logf "sigs.k8s.io/controller-runtime/pkg/log" 26 | ) 27 | 28 | type externalSyncer struct { 29 | name string 30 | obj interface{} 31 | owner runtime.Object 32 | syncFn func(context.Context, interface{}) (controllerutil.OperationResult, error) 33 | } 34 | 35 | func (s *externalSyncer) Object() interface{} { 36 | return s.obj 37 | } 38 | 39 | func (s *externalSyncer) ObjectType() string { 40 | return fmt.Sprintf("%T", s.obj) 41 | } 42 | 43 | func (s *externalSyncer) ObjectOwner() runtime.Object { 44 | return s.owner 45 | } 46 | 47 | func (s *externalSyncer) Sync(ctx context.Context) (SyncResult, error) { 48 | var err error 49 | 50 | log := logf.FromContext(ctx, "syncer", s.name) 51 | 52 | result := SyncResult{} 53 | result.Operation, err = s.syncFn(ctx, s.obj) 54 | 55 | if err != nil { 56 | result.SetEventData(eventWarning, basicEventReason(s.name, err), 57 | fmt.Sprintf("%s failed syncing: %s", s.ObjectType(), err)) 58 | log.Error(err, string(result.Operation), "kind", s.ObjectType()) 59 | } else { 60 | result.SetEventData(eventNormal, basicEventReason(s.name, err), 61 | fmt.Sprintf("%s successfully %s", s.ObjectType(), result.Operation)) 62 | log.V(1).Info(string(result.Operation), "kind", s.ObjectType()) 63 | } 64 | 65 | return result, err 66 | } 67 | 68 | // NewExternalSyncer creates a new syncer which syncs a generic object 69 | // persisting it's state into and external store The name is used for logging 70 | // and event emitting purposes and should be an valid go identifier in upper 71 | // camel case. (eg. GiteaRepo). 72 | func NewExternalSyncer( 73 | name string, owner runtime.Object, obj interface{}, syncFn func(context.Context, interface{}) (controllerutil.OperationResult, error), 74 | ) Interface { 75 | return &externalSyncer{ 76 | name: name, 77 | obj: obj, 78 | owner: owner, 79 | syncFn: syncFn, 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /pkg/syncer/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package syncer 18 | 19 | import ( 20 | "context" 21 | 22 | "k8s.io/apimachinery/pkg/runtime" 23 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 24 | ) 25 | 26 | // SyncResult is a result of an Sync call. 27 | type SyncResult struct { 28 | Operation controllerutil.OperationResult 29 | EventType string 30 | EventReason string 31 | EventMessage string 32 | } 33 | 34 | // SetEventData sets event data on an SyncResult. 35 | func (r *SyncResult) SetEventData(eventType, reason, message string) { 36 | r.EventType = eventType 37 | r.EventReason = reason 38 | r.EventMessage = message 39 | } 40 | 41 | // Interface represents a syncer. A syncer persists an object 42 | // (known as subject), into a store (kubernetes apiserver or generic stores) 43 | // and records kubernetes events. 44 | type Interface interface { 45 | // Object returns the object for which sync applies. 46 | Object() interface{} 47 | 48 | // Owner returns the object owner or nil if object does not have one. 49 | ObjectOwner() runtime.Object 50 | 51 | // Sync persists data into the external store. 52 | Sync(ctx context.Context) (SyncResult, error) 53 | } 54 | -------------------------------------------------------------------------------- /pkg/syncer/object.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package syncer 18 | 19 | import ( 20 | "context" 21 | "errors" 22 | "fmt" 23 | 24 | "github.com/go-test/deep" 25 | "k8s.io/apimachinery/pkg/runtime" 26 | "sigs.k8s.io/controller-runtime/pkg/client" 27 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 28 | logf "sigs.k8s.io/controller-runtime/pkg/log" 29 | ) 30 | 31 | // ObjectSyncer is a syncer.Interface for syncing kubernetes.Objects only by 32 | // passing a SyncFn. 33 | type ObjectSyncer struct { 34 | Owner client.Object 35 | Obj client.Object 36 | SyncFn controllerutil.MutateFn 37 | Name string 38 | Client client.Client 39 | previousObject runtime.Object 40 | } 41 | 42 | // Object returns the ObjectSyncer subject. 43 | func (s *ObjectSyncer) Object() interface{} { 44 | return s.Obj 45 | } 46 | 47 | // ObjectOwner returns the ObjectSyncer owner. 48 | func (s *ObjectSyncer) ObjectOwner() runtime.Object { 49 | return s.Owner 50 | } 51 | 52 | // Sync does the actual syncing and implements the syncer.Inteface Sync method. 53 | func (s *ObjectSyncer) Sync(ctx context.Context) (SyncResult, error) { 54 | var err error 55 | 56 | result := SyncResult{} 57 | log := logf.FromContext(ctx, "syncer", s.Name) 58 | key := client.ObjectKeyFromObject(s.Obj) 59 | 60 | result.Operation, err = controllerutil.CreateOrUpdate(ctx, s.Client, s.Obj, s.mutateFn()) 61 | 62 | // check deep diff 63 | diff := deep.Equal(redact(s.previousObject), redact(s.Obj)) 64 | 65 | // don't pass to user error for owner deletion, just don't create the object 66 | //nolint: gocritic 67 | if errors.Is(err, ErrOwnerDeleted) { 68 | log.Info(string(result.Operation), "key", key, "kind", objectType(s.Obj, s.Client), "error", err) 69 | err = nil 70 | } else if errors.Is(err, ErrIgnore) { 71 | log.V(1).Info("syncer skipped", "key", key, "kind", objectType(s.Obj, s.Client), "error", err) 72 | err = nil 73 | } else if err != nil { 74 | result.SetEventData(eventWarning, basicEventReason(s.Name, err), 75 | fmt.Sprintf("%s %s failed syncing: %s", objectType(s.Obj, s.Client), key, err)) 76 | log.Error(err, string(result.Operation), "key", key, "kind", objectType(s.Obj, s.Client), "diff", diff) 77 | } else { 78 | result.SetEventData(eventNormal, basicEventReason(s.Name, err), 79 | fmt.Sprintf("%s %s %s successfully", objectType(s.Obj, s.Client), key, result.Operation)) 80 | log.V(1).Info(string(result.Operation), "key", key, "kind", objectType(s.Obj, s.Client), "diff", diff) 81 | } 82 | 83 | return result, err 84 | } 85 | 86 | // Given an ObjectSyncer, returns a controllerutil.MutateFn which also sets the 87 | // owner reference if the subject has one. 88 | func (s *ObjectSyncer) mutateFn() controllerutil.MutateFn { 89 | return func() error { 90 | s.previousObject = s.Obj.DeepCopyObject() 91 | 92 | err := s.SyncFn() 93 | if err != nil { 94 | return err 95 | } 96 | 97 | if s.Owner == nil { 98 | return nil 99 | } 100 | 101 | // set owner reference only if owner resource is not being deleted, otherwise the owner 102 | // reference will be reset in case of deleting with cascade=false. 103 | if s.Owner.GetDeletionTimestamp().IsZero() { 104 | if err := controllerutil.SetControllerReference(s.Owner, s.Obj, s.Client.Scheme()); err != nil { 105 | return err 106 | } 107 | } else if ctime := s.Obj.GetCreationTimestamp(); ctime.IsZero() { 108 | // the owner is deleted, don't recreate the resource if does not exist, because gc 109 | // will not delete it again because has no owner reference set 110 | return ErrOwnerDeleted 111 | } 112 | 113 | return nil 114 | } 115 | } 116 | 117 | // NewObjectSyncer creates a new kubernetes.Object syncer for a given object 118 | // with an owner and persists data using controller-runtime's CreateOrUpdate. 119 | // The name is used for logging and event emitting purposes and should be an 120 | // valid go identifier in upper camel case. (eg. MysqlStatefulSet). 121 | func NewObjectSyncer(name string, owner, obj client.Object, c client.Client, syncFn controllerutil.MutateFn) Interface { 122 | return &ObjectSyncer{ 123 | Owner: owner, 124 | Obj: obj, 125 | SyncFn: syncFn, 126 | Name: name, 127 | Client: c, 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /pkg/syncer/object_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package syncer_test 18 | 19 | import ( 20 | "fmt" 21 | "math/rand" 22 | 23 | . "github.com/onsi/ginkgo/v2" 24 | . "github.com/onsi/gomega" 25 | 26 | "golang.org/x/net/context" 27 | appsv1 "k8s.io/api/apps/v1" 28 | corev1 "k8s.io/api/core/v1" 29 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 | "k8s.io/apimachinery/pkg/types" 31 | "k8s.io/client-go/tools/record" 32 | 33 | "github.com/presslabs/controller-util/pkg/syncer" 34 | ) 35 | 36 | var _ = Describe("ObjectSyncer", func() { 37 | var ( 38 | objSyncer *syncer.ObjectSyncer 39 | deployment *appsv1.Deployment 40 | recorder *record.FakeRecorder 41 | owner *corev1.ConfigMap 42 | key types.NamespacedName 43 | ) 44 | 45 | BeforeEach(func() { 46 | r := rand.Int31() //nolint: gosec 47 | 48 | key = types.NamespacedName{ 49 | Name: fmt.Sprintf("example-%d", r), 50 | Namespace: fmt.Sprintf("default-%d", r), 51 | } 52 | 53 | deployment = &appsv1.Deployment{} 54 | recorder = record.NewFakeRecorder(100) 55 | owner = &corev1.ConfigMap{ 56 | ObjectMeta: metav1.ObjectMeta{ 57 | Name: key.Name, 58 | Namespace: key.Namespace, 59 | }, 60 | } 61 | ns := &corev1.Namespace{ 62 | ObjectMeta: metav1.ObjectMeta{ 63 | Name: key.Namespace, 64 | }, 65 | } 66 | Expect(c.Create(context.TODO(), ns)).To(Succeed()) 67 | Expect(c.Create(context.TODO(), owner)).To(Succeed()) 68 | }) 69 | 70 | AfterEach(func() { 71 | c.Delete(context.TODO(), deployment) //nolint: errcheck 72 | c.Delete(context.TODO(), owner) //nolint: errcheck 73 | }) 74 | 75 | When("syncing", func() { 76 | It("successfully creates an ownerless object when owner is nil", func() { 77 | var convOk bool 78 | 79 | objSyncer, convOk = NewDeploymentSyncer(nil, key).(*syncer.ObjectSyncer) 80 | Expect(convOk).To(BeTrue()) 81 | Expect(syncer.Sync(context.TODO(), objSyncer, recorder)).To(Succeed()) 82 | 83 | Expect(c.Get(context.TODO(), key, deployment)).To(Succeed()) 84 | 85 | Expect(deployment.ObjectMeta.OwnerReferences).To(BeEmpty()) 86 | 87 | Expect(deployment.Spec.Template.Spec.Containers).To(HaveLen(1)) 88 | Expect(deployment.Spec.Template.Spec.Containers[0].Name).To(Equal("busybox")) 89 | Expect(deployment.Spec.Template.Spec.Containers[0].Image).To(Equal("busybox")) 90 | 91 | // since this is an ownerless object, no event is emitted 92 | Consistently(recorder.Events).ShouldNot(Receive()) 93 | }) 94 | 95 | It("successfully creates an object and set owner references", func() { 96 | var convOk bool 97 | 98 | objSyncer, convOk = NewDeploymentSyncer(owner, key).(*syncer.ObjectSyncer) 99 | Expect(convOk).To(BeTrue()) 100 | Expect(syncer.Sync(context.TODO(), objSyncer, recorder)).To(Succeed()) 101 | 102 | Expect(c.Get(context.TODO(), key, deployment)).To(Succeed()) 103 | 104 | Expect(deployment.ObjectMeta.OwnerReferences).To(HaveLen(1)) 105 | Expect(deployment.ObjectMeta.OwnerReferences[0].Name).To(Equal(owner.ObjectMeta.Name)) 106 | Expect(*deployment.ObjectMeta.OwnerReferences[0].Controller).To(BeTrue()) 107 | 108 | var event string 109 | Expect(recorder.Events).To(Receive(&event)) 110 | Expect(event).To(ContainSubstring("ExampleDeploymentSyncSuccessfull")) 111 | Expect(event).To(ContainSubstring( 112 | fmt.Sprintf("apps/v1, Kind=Deployment %s/%s created successfully", key.Namespace, key.Name), 113 | )) 114 | }) 115 | 116 | It("should ignore ErrIgnore", func() { 117 | obj := &appsv1.Deployment{ 118 | ObjectMeta: metav1.ObjectMeta{ 119 | Name: "example", 120 | Namespace: "default", 121 | }, 122 | } 123 | 124 | syn := syncer.NewObjectSyncer("unknown", nil, obj, c, func() error { 125 | return syncer.ErrIgnore 126 | }) 127 | 128 | Expect(syncer.Sync(context.TODO(), syn, recorder)).To(Succeed()) 129 | }) 130 | 131 | When("owner is deleted", func() { 132 | BeforeEach(func() { 133 | // set deletion timestamp on owner resource 134 | now := metav1.Now() 135 | owner.ObjectMeta.DeletionTimestamp = &now 136 | }) 137 | It("should not create the resource if not exists", func() { 138 | var convOk bool 139 | 140 | objSyncer, convOk = NewDeploymentSyncer(owner, key).(*syncer.ObjectSyncer) 141 | Expect(convOk).To(BeTrue()) 142 | Expect(syncer.Sync(context.TODO(), objSyncer, recorder)).To(Succeed()) 143 | 144 | // check deployment is not created 145 | Expect(c.Get(context.TODO(), key, deployment)).ToNot(Succeed()) 146 | }) 147 | 148 | It("should not set owner reference", func() { 149 | var convOk bool 150 | 151 | // create the deployment 152 | objSyncer, convOk = NewDeploymentSyncer(nil, key).(*syncer.ObjectSyncer) 153 | Expect(convOk).To(BeTrue()) 154 | Expect(syncer.Sync(context.TODO(), objSyncer, recorder)).To(Succeed()) 155 | 156 | // try to set owner reference 157 | objSyncer, convOk = NewDeploymentSyncer(owner, key).(*syncer.ObjectSyncer) 158 | Expect(convOk).To(BeTrue()) 159 | Expect(syncer.Sync(context.TODO(), objSyncer, recorder)).To(Succeed()) 160 | 161 | // check deployment does not have owner reference set 162 | Expect(c.Get(context.TODO(), key, deployment)).To(Succeed()) 163 | Expect(deployment.ObjectMeta.OwnerReferences).To(BeEmpty()) 164 | }) 165 | }) 166 | }) 167 | }) 168 | -------------------------------------------------------------------------------- /pkg/syncer/remove_resource.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package syncer 18 | 19 | import ( 20 | "context" 21 | "fmt" 22 | 23 | k8serrors "k8s.io/apimachinery/pkg/api/errors" 24 | "k8s.io/apimachinery/pkg/runtime" 25 | "sigs.k8s.io/controller-runtime/pkg/client" 26 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 27 | logf "sigs.k8s.io/controller-runtime/pkg/log" 28 | ) 29 | 30 | // RemoveResourceSyncer is a syncer.Interface for deleting kubernetes.Objects. 31 | type RemoveResourceSyncer struct { 32 | Owner client.Object 33 | Obj client.Object 34 | Name string 35 | Client client.Client 36 | } 37 | 38 | // Object returns the ObjectSyncer subject. 39 | func (s *RemoveResourceSyncer) Object() interface{} { 40 | return s.Obj 41 | } 42 | 43 | // ObjectOwner returns the ObjectSyncer owner. 44 | func (s *RemoveResourceSyncer) ObjectOwner() runtime.Object { 45 | return s.Owner 46 | } 47 | 48 | // Sync does the actual syncing and implements the syncer.Inteface Sync method. 49 | func (s *RemoveResourceSyncer) Sync(ctx context.Context) (SyncResult, error) { 50 | result := SyncResult{} 51 | log := logf.FromContext(ctx, "syncer", s.Name) 52 | key := client.ObjectKeyFromObject(s.Obj) 53 | 54 | result.Operation = controllerutil.OperationResultNone 55 | 56 | // fetch the resource 57 | if err := s.Client.Get(ctx, key, s.Obj); err != nil { 58 | if k8serrors.IsNotFound(err) { 59 | return result, nil 60 | } 61 | 62 | log.Error(err, string(result.Operation), "key", key, "kind", objectType(s.Obj, s.Client)) 63 | 64 | return result, fmt.Errorf("error when fetching resource: %w", err) 65 | } 66 | 67 | // delete the resource 68 | if err := s.Client.Delete(ctx, s.Obj); err != nil { 69 | log.Error(err, string(result.Operation), "key", key, "kind", objectType(s.Obj, s.Client)) 70 | 71 | return result, fmt.Errorf("error when deleting resource: %w", err) 72 | } 73 | 74 | result.Operation = controllerutil.OperationResult("deleted") 75 | result.SetEventData(eventNormal, basicEventReason(s.Name, nil), fmt.Sprintf("%s %s successfully deleted", objectType(s.Obj, s.Client), key)) 76 | 77 | log.V(1).Info(string(result.Operation), "key", key, "kind", objectType(s.Obj, s.Client)) 78 | 79 | return result, nil 80 | } 81 | 82 | // NewRemoveResourceSyncer creates a new kubernetes.Object syncer for a given object 83 | // with an owner and persists data using controller-runtime's Delete. 84 | // The name is used for logging and event emitting purposes and should be an 85 | // valid go identifier in upper camel case. (eg. MysqlStatefulSet). 86 | func NewRemoveResourceSyncer(name string, owner, obj client.Object, c client.Client) Interface { 87 | return &RemoveResourceSyncer{ 88 | Owner: owner, 89 | Obj: obj, 90 | Name: name, 91 | Client: c, 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /pkg/syncer/remove_resource_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package syncer_test 18 | 19 | import ( 20 | "fmt" 21 | "math/rand" 22 | 23 | . "github.com/onsi/ginkgo/v2" 24 | . "github.com/onsi/gomega" 25 | 26 | "golang.org/x/net/context" 27 | appsv1 "k8s.io/api/apps/v1" 28 | corev1 "k8s.io/api/core/v1" 29 | k8serrors "k8s.io/apimachinery/pkg/api/errors" 30 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 31 | "k8s.io/apimachinery/pkg/types" 32 | "k8s.io/client-go/tools/record" 33 | 34 | "github.com/presslabs/controller-util/pkg/syncer" 35 | ) 36 | 37 | var _ = Describe("ObjectSyncer", func() { 38 | var ( 39 | removeResSyncer *syncer.RemoveResourceSyncer 40 | deployment *appsv1.Deployment 41 | recorder *record.FakeRecorder 42 | owner *corev1.ConfigMap 43 | key types.NamespacedName 44 | ) 45 | 46 | BeforeEach(func() { 47 | r := rand.Int31() //nolint: gosec 48 | 49 | key = types.NamespacedName{ 50 | Name: fmt.Sprintf("example-%d", r), 51 | Namespace: fmt.Sprintf("default-%d", r), 52 | } 53 | deplLabels := map[string]string{"test": "test"} 54 | deployment = &appsv1.Deployment{ 55 | ObjectMeta: metav1.ObjectMeta{ 56 | Name: key.Name, 57 | Namespace: key.Namespace, 58 | }, 59 | Spec: appsv1.DeploymentSpec{ 60 | Selector: &metav1.LabelSelector{ 61 | MatchLabels: deplLabels, 62 | }, 63 | Template: corev1.PodTemplateSpec{ 64 | ObjectMeta: metav1.ObjectMeta{ 65 | Labels: deplLabels, 66 | }, 67 | Spec: corev1.PodSpec{ 68 | Containers: []corev1.Container{ 69 | { 70 | Name: "test", 71 | Image: "test", 72 | }, 73 | }, 74 | }, 75 | }, 76 | }, 77 | } 78 | owner = &corev1.ConfigMap{ 79 | ObjectMeta: metav1.ObjectMeta{ 80 | Name: key.Name, 81 | Namespace: key.Namespace, 82 | }, 83 | } 84 | ns := &corev1.Namespace{ 85 | ObjectMeta: metav1.ObjectMeta{ 86 | Name: key.Namespace, 87 | }, 88 | } 89 | recorder = record.NewFakeRecorder(100) 90 | 91 | Expect(c.Create(context.TODO(), ns)).To(Succeed()) 92 | Expect(c.Create(context.TODO(), deployment)).To(Succeed()) 93 | Expect(c.Create(context.TODO(), owner)).To(Succeed()) 94 | }) 95 | 96 | When("syncing", func() { 97 | It("successfully deletes existing resource", func() { 98 | var convOk bool 99 | 100 | removeResSyncer, convOk = syncer.NewRemoveResourceSyncer("test-remove-resource-syncer", owner, deployment, c).(*syncer.RemoveResourceSyncer) 101 | Expect(convOk).To(BeTrue()) 102 | Expect(syncer.Sync(context.TODO(), removeResSyncer, recorder)).To(Succeed()) 103 | 104 | Expect(k8serrors.IsNotFound(c.Get(context.TODO(), key, deployment))).To(BeTrue()) 105 | 106 | Expect(<-recorder.Events).To(Equal( 107 | fmt.Sprintf("Normal TestRemoveResourceSyncerSyncSuccessfull apps/v1, Kind=Deployment %s/%s successfully deleted", key.Namespace, key.Name), 108 | )) 109 | 110 | // no more events 111 | Consistently(recorder.Events).ShouldNot(Receive()) 112 | }) 113 | 114 | It("skip deleting when the resource is lready deleted", func() { 115 | var convOk bool 116 | 117 | Expect(c.Delete(context.TODO(), deployment)).To(Succeed()) 118 | 119 | removeResSyncer, convOk = syncer.NewRemoveResourceSyncer("test-remove-resource-syncer", owner, deployment, c).(*syncer.RemoveResourceSyncer) 120 | Expect(convOk).To(BeTrue()) 121 | Expect(syncer.Sync(context.TODO(), removeResSyncer, recorder)).To(Succeed()) 122 | 123 | Expect(k8serrors.IsNotFound(c.Get(context.TODO(), key, deployment))).To(BeTrue()) 124 | 125 | // since this is an ownerless object, no event is emitted 126 | Consistently(recorder.Events).ShouldNot(Receive()) 127 | }) 128 | }) 129 | }) 130 | -------------------------------------------------------------------------------- /pkg/syncer/syncer.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package syncer 18 | 19 | import ( 20 | "context" 21 | "errors" 22 | "fmt" 23 | 24 | "github.com/iancoleman/strcase" 25 | corev1 "k8s.io/api/core/v1" 26 | "k8s.io/apimachinery/pkg/runtime" 27 | "k8s.io/client-go/tools/record" 28 | "sigs.k8s.io/controller-runtime/pkg/client" 29 | "sigs.k8s.io/controller-runtime/pkg/client/apiutil" 30 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 31 | ) 32 | 33 | const ( 34 | eventNormal = "Normal" 35 | eventWarning = "Warning" 36 | ) 37 | 38 | var ( 39 | // ErrOwnerDeleted is returned when the object owner is marked for deletion. 40 | ErrOwnerDeleted = errors.New("owner is deleted") 41 | 42 | // ErrIgnore is returned for ignored errors. 43 | // Ignored errors are treated by the syncer as successful syncs. 44 | ErrIgnore = errors.New("ignored error") 45 | ) 46 | 47 | // IgnoredError wraps and marks errors as being ignored. 48 | func IgnoredError(err error) error { 49 | return fmt.Errorf("%w: %w", err, ErrIgnore) 50 | } 51 | 52 | func basicEventReason(objKindName string, err error) string { 53 | if err != nil { 54 | return strcase.ToCamel(objKindName) + "SyncFailed" 55 | } 56 | 57 | return strcase.ToCamel(objKindName) + "SyncSuccessfull" 58 | } 59 | 60 | // Redacts sensitive data from runtime.Object making them suitable for logging. 61 | func redact(obj runtime.Object) runtime.Object { 62 | switch exposed := obj.(type) { 63 | case *corev1.Secret: 64 | redacted := exposed.DeepCopy() 65 | redacted.Data = nil 66 | redacted.StringData = nil 67 | exposed.ObjectMeta.DeepCopyInto(&redacted.ObjectMeta) 68 | 69 | return redacted 70 | case *corev1.ConfigMap: 71 | redacted := exposed.DeepCopy() 72 | redacted.Data = nil 73 | 74 | return redacted 75 | } 76 | 77 | return obj 78 | } 79 | 80 | // objectType returns the type of a runtime.Object. 81 | func objectType(obj runtime.Object, c client.Client) string { 82 | if obj != nil { 83 | gvk, err := apiutil.GVKForObject(obj, c.Scheme()) 84 | if err != nil { 85 | return fmt.Sprintf("%T", obj) 86 | } 87 | 88 | return gvk.String() 89 | } 90 | 91 | return "nil" 92 | } 93 | 94 | // Sync mutates the subject of the syncer interface using controller-runtime 95 | // CreateOrUpdate method, when obj is not nil. It takes care of setting owner 96 | // references and recording kubernetes events where appropriate. 97 | func Sync(ctx context.Context, syncer Interface, recorder record.EventRecorder) error { 98 | result, err := syncer.Sync(ctx) 99 | owner := syncer.ObjectOwner() 100 | 101 | if recorder != nil && owner != nil && result.EventType != "" && result.EventReason != "" && result.EventMessage != "" { 102 | if err != nil || result.Operation != controllerutil.OperationResultNone { 103 | recorder.Eventf(owner, result.EventType, result.EventReason, result.EventMessage) 104 | } 105 | } 106 | 107 | return err 108 | } 109 | 110 | // WithoutOwner partially implements implements the syncer interface for the 111 | // case the subject has no owner. 112 | type WithoutOwner struct{} 113 | 114 | // GetOwner implementation of syncer interface for the case the subject has no owner. 115 | func (*WithoutOwner) GetOwner() client.Object { 116 | return nil 117 | } 118 | -------------------------------------------------------------------------------- /pkg/syncer/syncer_suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package syncer_test 18 | 19 | import ( 20 | "testing" 21 | 22 | . "github.com/onsi/ginkgo/v2" 23 | . "github.com/onsi/gomega" 24 | 25 | "k8s.io/client-go/kubernetes/scheme" 26 | "k8s.io/client-go/rest" 27 | "sigs.k8s.io/controller-runtime/pkg/client" 28 | "sigs.k8s.io/controller-runtime/pkg/envtest" 29 | ) 30 | 31 | var ( 32 | t *envtest.Environment 33 | cfg *rest.Config 34 | c client.Client 35 | ) 36 | 37 | func TestV1alpha1(t *testing.T) { 38 | RegisterFailHandler(Fail) 39 | RunSpecs(t, "Syncer Suite") 40 | } 41 | 42 | var _ = BeforeSuite(func() { 43 | var err error 44 | 45 | t = &envtest.Environment{} 46 | 47 | cfg, err = t.Start() 48 | Expect(err).NotTo(HaveOccurred()) 49 | 50 | c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 51 | Expect(err).NotTo(HaveOccurred()) 52 | }) 53 | 54 | var _ = AfterSuite(func() { 55 | Expect(t.Stop()).To(Succeed()) 56 | }) 57 | -------------------------------------------------------------------------------- /pkg/syncer/syncer_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Pressinfra SRL. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package syncer 18 | 19 | import ( 20 | . "github.com/onsi/ginkgo/v2" 21 | . "github.com/onsi/gomega" 22 | 23 | corev1 "k8s.io/api/core/v1" 24 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 25 | ) 26 | 27 | var _ = Describe("redact function", func() { 28 | It("returns initial object when it doesn't contain secret data", func() { 29 | obj := &corev1.Pod{ 30 | ObjectMeta: metav1.ObjectMeta{ 31 | Name: "awesome-pod", 32 | Namespace: "default", 33 | }, 34 | Spec: corev1.PodSpec{ 35 | Containers: []corev1.Container{ 36 | { 37 | Name: "awesome-container", 38 | }, 39 | }, 40 | }, 41 | } 42 | 43 | Expect(redact(obj)).To(Equal(obj)) 44 | }) 45 | 46 | It("returns the object without secret data when the object is a secret", func() { 47 | obj := &corev1.Secret{ 48 | ObjectMeta: metav1.ObjectMeta{ 49 | Name: "awesome-secret", 50 | Namespace: "default", 51 | }, 52 | Data: map[string][]byte{ 53 | "awesome-secret-key": []byte("awesome-secret-data"), 54 | }, 55 | StringData: map[string]string{ 56 | "another-awesome-secret-key": "another-awesome-secret-data", 57 | }, 58 | } 59 | 60 | expectedObj := &corev1.Secret{ 61 | ObjectMeta: metav1.ObjectMeta{ 62 | Name: "awesome-secret", 63 | Namespace: "default", 64 | }, 65 | } 66 | 67 | Expect(redact(obj)).To(Equal(expectedObj)) 68 | Expect(obj.Data).To(HaveKey("awesome-secret-key")) 69 | }) 70 | }) 71 | --------------------------------------------------------------------------------