├── .coveragerc ├── .dockerignore ├── .github └── workflows │ └── main.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── _config.yml ├── helmVsK8s-handle.png ├── k8s_handle ├── __init__.py ├── __main__.py ├── config.py ├── dictionary.py ├── exceptions.py ├── filesystem.py ├── k8s │ ├── __init__.py │ ├── adapters.py │ ├── api_clients.py │ ├── api_extensions.py │ ├── diff.py │ ├── fixtures │ │ ├── deployment.yaml │ │ ├── deployment_404.yaml │ │ ├── deployment_no_api.yaml │ │ ├── deployment_wo_replicas.yaml │ │ ├── empty.yaml │ │ ├── invalid_version.yaml │ │ ├── nokind.yaml │ │ ├── nometadata.yaml │ │ ├── nometadataname.yaml │ │ ├── pvc.yaml │ │ ├── pvc2.yaml │ │ ├── pvc3.yaml │ │ ├── service.yaml │ │ ├── service_empty_kind.yaml │ │ ├── service_no_kind.yaml │ │ ├── service_no_ports.yaml │ │ ├── unsupported_version.yaml │ │ ├── valid.yaml │ │ └── valid_version.yaml │ ├── mocks.py │ ├── provisioner.py │ ├── test_adapters.py │ ├── test_api_clients.py │ ├── test_provisioner.py │ ├── test_warning_handler.py │ └── warning_handler.py ├── settings.py ├── templating.py └── transforms.py ├── requirements.txt ├── setup.py ├── tests ├── __init__.py ├── fixtures │ ├── config.yaml │ ├── config_with_env_vars.yaml │ ├── config_with_include_and_env_vars.yaml │ ├── config_without_k8s.yaml │ ├── dashes_config.yaml │ ├── empty_config.yaml │ ├── include.yaml │ ├── include_2levels_with_env.yaml │ ├── include_with_env.yaml │ ├── incorrect_config.yaml │ ├── incorrect_include.yaml │ └── incorrect_include2.yaml ├── templates_tests │ ├── filters.yaml.j2 │ ├── innerdir │ │ └── template1.yaml.j2 │ ├── my_file.txt │ ├── my_file1.txt │ ├── template-dashes.yaml.j2 │ ├── template1.yaml.j2 │ ├── template2.yaml.j2 │ ├── template3.yaml.j2 │ ├── template4.yaml.j2 │ ├── template_include_file.yaml.j2 │ └── template_list_files.yaml.j2 ├── test_config.py ├── test_dictionary.py ├── test_handlers.py └── test_templating.py └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | omit = .tox/*,templates/*,templates_tests/*,tests/*,*mocks.py,k8s/test_* 3 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .dockerignore 2 | Dockerfile* 3 | .coverage 4 | .coveragerc 5 | .git/ 6 | .gitignore 7 | .idea/ 8 | .tox/ 9 | .travis.yml 10 | tox.ini 11 | __pychache__ 12 | htmlcov/ 13 | tests/ 14 | *.png 15 | 16 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | on: [push] 2 | 3 | permissions: 4 | contents: read 5 | 6 | jobs: 7 | tox: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v3 11 | 12 | - name: Set up Python 3.10 13 | uses: actions/setup-python@v4 14 | with: 15 | python-version: "3.10" 16 | 17 | - name: Install dependencies 18 | run: | 19 | python -m pip install --upgrade pip 20 | pip install tox codecov 21 | pip install -r requirements.txt 22 | 23 | - name: Unit tests 24 | run: | 25 | tox -e py 26 | 27 | - name: Upload Coverage to Codecov 28 | uses: codecov/codecov-action@v3 29 | 30 | docker: 31 | runs-on: ubuntu-latest 32 | steps: 33 | - name: Set up QEMU 34 | uses: docker/setup-qemu-action@v2 35 | 36 | - name: Set up Docker Buildx 37 | uses: docker/setup-buildx-action@v2 38 | 39 | - name: Build Docker image 40 | uses: docker/build-push-action@v3 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11-alpine 2 | 3 | LABEL description="Image with k8s-handle" \ 4 | maintainer="sre@jetbrains.com" \ 5 | source="https://github.com/jetbrains-infra/k8s-handle" 6 | 7 | ADD . /opt/k8s-handle/ 8 | 9 | RUN apk --no-cache add git ca-certificates bash curl openssl gcc libc-dev libffi-dev openssl-dev make \ 10 | && cd /opt/k8s-handle \ 11 | && python setup.py install \ 12 | && apk del gcc libc-dev libffi-dev openssl-dev 13 | 14 | ENV PATH="/opt/k8s-handle:${PATH}" 15 | 16 | WORKDIR /tmp/ 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # k8s-handle 2 | 3 | Easy CI/CD for Kubernetes clusters with python and jinja2 4 | 5 | k8s-handle is a command line tool that facilitates continuous delivery for Kubernetes applications. 6 | Also k8s-handle supports environments, so you can use same deployment templates for different environments like `staging` and `production`. 7 | k8s-handle is a helm alternative, but without package manager 8 | 9 | # Table of contents 10 | * [Features](#features) 11 | * [k8s-handle vs helm](#k8s-handle-vs-helm) 12 | * [Before you begin](#before-you-begin) 13 | * [Installation with pip](#installation-with-pip) 14 | * [Usage with docker](#usage-with-docker) 15 | * [Usage with CI/CD tools](#usage-with-cicd-tools) 16 | * [Usage](#usage) 17 | * [Example](#example) 18 | * [Docs](#docs) 19 | * [Configuration structure](#configuration-structure) 20 | * [Environments](#environments) 21 | * [Common section](#common-section) 22 | * [Any other sections](#any-other-sections) 23 | * [Deploy specific environment](#deploy-specific-environment) 24 | * [Templates](#templates) 25 | * [Variables](#variables) 26 | * [Merging with common](#merging-with-common) 27 | * [Load variables from environment](#load-variables-from-environment) 28 | * [Load variables from yaml file](#load-variables-from-yaml-file) 29 | * [How to use in CI/CD](#how-to-use-in-cicd) 30 | * [Gitlab CI](#gitlab-ci) 31 | * [Native integration](#native-integration) 32 | * [Through variables](#through-variables) 33 | * [Working modes](#working-modes) 34 | * [Sync mode](#sync-mode) 35 | * [Strict mode](#strict-mode) 36 | * [Destroy](#destroy) 37 | * [Diff](#diff) 38 | * [Operating without config.yaml](#operating-without-configyaml) 39 | * [Render](#render) 40 | * [Apply](#apply) 41 | * [Delete](#delete) 42 | 43 | # Features 44 | * Easy to use command line interface 45 | * Configure any variables in one configuration file (config.yaml) 46 | * Templating for kubernetes resource files (jinja2) with includes, loops, if-else and so on. 47 | * Loading variables from environment 48 | * Includes for configuration (includes in config.yaml) for big deploys 49 | * Async and sync mode for deploy (wait for deployment, statefulset, daemonset ready) 50 | * Strict mode, stop deploy if any warning appear 51 | * Easy integration with CI pipeline (gitlab ci for example) 52 | * Ability to destroy resources (deploy and destroy from git branches, gitlab environments) 53 | 54 | # k8s-handle vs helm 55 | * k8s-handle acts like template parser and provisioning tool, but not package manager included like in helm 56 | * k8s-handle don't need in cluster tools like The Tiller Server, you need only ServiceAccount for deploy 57 | * k8s-handle secure by default, you don't need to generate any certificates for deploying application, k8s-handle uses kubernetes REST API with https, like kubectl 58 | 59 | ![Deploy process](/helmVsK8s-handle.png) 60 | 61 | # Before you begin 62 | * Setup Kubernetes cluster [https://kubernetes.io/docs/setup/](https://kubernetes.io/docs/setup/), or use any predefined 63 | * Install `kubectl` if you don't have it [https://kubernetes.io/docs/tasks/tools/install-kubectl/](https://kubernetes.io/docs/tasks/tools/install-kubectl/) 64 | * Create kubeconfig(~/.kube/config) or skip if you already have one 65 | ```bash 66 | $ cat > ~/.kube/kubernetes.ca.crt << EOF 67 | > 68 | >EOF 69 | cat > ~/.kube/config << EOF 70 | apiVersion: v1 71 | kind: Config 72 | preferences: {} 73 | clusters: 74 | - cluster: 75 | certificate-authority: kubernetes.ca.crt 76 | server: < protocol://masterurl:port > 77 | name: my-cluster 78 | contexts: 79 | - context: 80 | cluster: my-cluster 81 | namespace: my-namespace 82 | user: my-user 83 | name: my-context 84 | current-context: my-context 85 | users: 86 | - name: my-user 87 | user: 88 | token: 89 | EOF 90 | ``` 91 | # Installation with pip 92 | Required python 3.4 or higher 93 | ``` 94 | $ pip install k8s-handle 95 | -- or -- 96 | $ pip install --user k8s-handle 97 | ``` 98 | 99 | 100 | # Usage with docker 101 | ```bash 102 | $ cd $WORKDIR 103 | $ git clone https://github.com/2gis/k8s-handle-example.git 104 | $ cd k8s-handle-example 105 | $ docker run --rm -v $(pwd):/tmp/ -v "$HOME/.kube:/root/.kube" 2gis/k8s-handle k8s-handle deploy -s staging --use-kubeconfig 106 | INFO:templating:File "/tmp/k8s-handle/configmap.yaml" successfully generated 107 | INFO:templating:Trying to generate file from template "secret.yaml.j2" in "/tmp/k8s-handle" 108 | INFO:templating:File "/tmp/k8s-handle/secret.yaml" successfully generated 109 | INFO:templating:Trying to generate file from template "deployment.yaml.j2" in "/tmp/k8s-handle" 110 | INFO:templating:File "/tmp/k8s-handle/deployment.yaml" successfully generated 111 | INFO:k8s.resource:ConfigMap "k8s-starter-kit-nginx-conf" already exists, replace it 112 | INFO:k8s.resource:Secret "k8s-starter-kit-secret" already exists, replace it 113 | INFO:k8s.resource:Deployment "k8s-starter-kit" does not exist, create it 114 | 115 | _(_)_ wWWWw _ 116 | @@@@ (_)@(_) vVVVv _ @@@@ (___) _(_)_ 117 | @@()@@ wWWWw (_)\ (___) _(_)_ @@()@@ Y (_)@(_) 118 | @@@@ (___) `|/ Y (_)@(_) @@@@ \|/ (_) 119 | / Y \| \|/ /(_) \| |/ | 120 | \ | \ |/ | / \ | / \|/ |/ \| \|/ 121 | \|// \|/// \|// \|/// \|/// \|// |// \|// 122 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 123 | ``` 124 | 125 | # Usage with CI/CD tools 126 | If you are using Gitlab CI, TeamCity or something else, you can use docker runner/agent, script will be slightly different: 127 | ```bash 128 | $ k8s-handle deploy -s staging 129 | ``` 130 | Configure checkout for https://github.com/2gis/k8s-handle-example.git and specific branch `without-kubeconfig` 131 | Also you need to setup next env vars: 132 | * K8S_NAMESPACE 133 | * K8S_MASTER_URI 134 | * K8S_CA_BASE64 (optional) 135 | * K8S_TOKEN 136 | 137 | use image 2gis/k8s-handle: 138 | 139 | Notice: If you use Gitlab CI, you can configure [Kubernetes integration](https://docs.gitlab.com/ee/user/project/clusters/#adding-an-existing-kubernetes-cluster) and just use `--use-kubeconfig` flag. 140 | 141 | # Usage 142 | ```bash 143 | $ k8s-handle deploy -s staging --use-kubeconfig 144 | INFO:templating:Trying to generate file from template "configmap.yaml.j2" in "/tmp/k8s-handle" 145 | INFO:templating:File "/tmp/k8s-handle/configmap.yaml" successfully generated 146 | INFO:templating:Trying to generate file from template "secret.yaml.j2" in "/tmp/k8s-handle" 147 | INFO:templating:File "/tmp/k8s-handle/secret.yaml" successfully generated 148 | INFO:templating:Trying to generate file from template "deployment.yaml.j2" in "/tmp/k8s-handle" 149 | INFO:templating:File "/tmp/k8s-handle/deployment.yaml" successfully generated 150 | INFO:k8s.resource:ConfigMap "k8s-starter-kit-nginx-conf" already exists, replace it 151 | INFO:k8s.resource:Secret "k8s-starter-kit-secret" already exists, replace it 152 | INFO:k8s.resource:Deployment "k8s-starter-kit" does not exist, create it 153 | 154 | _(_)_ wWWWw _ 155 | @@@@ (_)@(_) vVVVv _ @@@@ (___) _(_)_ 156 | @@()@@ wWWWw (_)\ (___) _(_)_ @@()@@ Y (_)@(_) 157 | @@@@ (___) `|/ Y (_)@(_) @@@@ \|/ (_) 158 | / Y \| \|/ /(_) \| |/ | 159 | \ | \ |/ | / \ | / \|/ |/ \| \|/ 160 | \|// \|/// \|// \|/// \|/// \|// |// \|// 161 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 162 | $ kubectl get configmap 163 | NAME DATA AGE 164 | k8s-starter-kit-nginx-conf 1 1m 165 | $ kubectl get secret | grep starter-kit 166 | k8s-starter-kit-secret Opaque 1 1m 167 | $ kubectl get deploy 168 | NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE 169 | k8s-starter-kit 1 1 1 1 3m 170 | ``` 171 | Now set replicas_count in config.yaml to 3, and run again in sync mode 172 | ```bash 173 | $ k8s-handle deploy -s staging --use-kubeconfig --sync-mode 174 | ... 175 | INFO:k8s.resource:Deployment "k8s-starter-kit" already exists, replace it 176 | INFO:k8s.resource:desiredReplicas = 3, updatedReplicas = 3, availableReplicas = 1 177 | INFO:k8s.resource:Deployment not completed on 1 attempt, next attempt in 5 sec. 178 | INFO:k8s.resource:desiredReplicas = 3, updatedReplicas = 3, availableReplicas = 2 179 | INFO:k8s.resource:Deployment not completed on 2 attempt, next attempt in 5 sec. 180 | INFO:k8s.resource:desiredReplicas = 3, updatedReplicas = 3, availableReplicas = 3 181 | INFO:k8s.resource:Deployment completed on 3 attempt 182 | ... 183 | $ kubectl get deploy 184 | NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE 185 | k8s-starter-kit 3 3 3 3 7m 186 | ``` 187 | 188 | # Example 189 | You can start by example https://github.com/2gis/k8s-handle-example. There are nginx with index.html and all needed kubernetes resources for deploy them. 190 | ```bash 191 | $ cd $WORKDIR 192 | $ git clone https://github.com/2gis/k8s-handle-example.git 193 | $ cd k8s-handle-example 194 | $ k8s-handle deploy -s staging --use-kubeconfig --sync-mode 195 | INFO:__main__:Using default namespace k8s-handle-test 196 | INFO:templating:Trying to generate file from template "configmap.yaml.j2" in "/tmp/k8s-handle" 197 | INFO:templating:File "/tmp/k8s-handle/configmap.yaml" successfully generated 198 | INFO:templating:Trying to generate file from template "deployment.yaml.j2" in "/tmp/k8s-handle" 199 | INFO:templating:File "/tmp/k8s-handle/deployment.yaml" successfully generated 200 | INFO:templating:Trying to generate file from template "service.yaml.j2" in "/tmp/k8s-handle" 201 | INFO:templating:File "/tmp/k8s-handle/service.yaml" successfully generated 202 | INFO:k8s.resource:ConfigMap "example-nginx-conf" does not exist, create it 203 | INFO:k8s.resource:Deployment "example" does not exist, create it 204 | INFO:k8s.resource:desiredReplicas = 1, updatedReplicas = 1, availableReplicas = None 205 | INFO:k8s.resource:Deployment not completed on 1 attempt, next attempt in 5 sec. 206 | INFO:k8s.resource:desiredReplicas = 1, updatedReplicas = 1, availableReplicas = None 207 | INFO:k8s.resource:Deployment not completed on 2 attempt, next attempt in 5 sec. 208 | INFO:k8s.resource:desiredReplicas = 1, updatedReplicas = 1, availableReplicas = 1 209 | INFO:k8s.resource:Deployment completed on 3 attempt 210 | INFO:k8s.resource:Service "example" does not exist, create it 211 | $ kubectl -n k8s-handle-test get svc 212 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 213 | example NodePort 10.100.132.168 80:31153/TCP 52s 214 | $ curl http://:31153 215 |

Hello world!

216 | Deployed with k8s-handle. 217 | ``` 218 | 219 | # Docs 220 | ## Configuration structure 221 | k8s-handle works with 2 components: 222 | * config.yaml (or any other yaml file through -c argument) that stores all configuration for deploy 223 | * templates catalog, where your can store all required templates for kubernetes resource files (can be changed through 224 | TEMPLATES_DIR env var) 225 | 226 | ## Environments 227 | If your have testing, staging, production-zone-1, production-zone-2, etc, you can easily cover all environments with 228 | one set of templates for your application without duplication. 229 | ### Common section 230 | In the common section you can specify variables that you want to combine with the variables of the selected section: 231 | ```yaml 232 | common: 233 | app_name: my-shiny-app 234 | app_port: 8080 235 | ``` 236 | Both of these example variables will be added to variables of the selected section. 237 | Common section is optional and can be omitted. 238 | ### Any other sections 239 | Let's specify testing environment 240 | ```yaml 241 | testing: 242 | replicas: 1 243 | request_cpu: 100m 244 | request_memory: 128M 245 | some_option: disabled 246 | ``` 247 | In testing in most cases we don't want performance from our application so we can keep 1 replica and small 248 | amount of resources for it. Also you can set some options to disabled state, in case when you don't want to affect 249 | any integrated systems during testing during testing. 250 | ```yaml 251 | staging: 252 | replicas: 2 253 | request_cpu: 200m 254 | request_memory: 512M 255 | ``` 256 | Some teams use staging for integration and demo, so we can increase replicas and resources for our service. 257 | ```yaml 258 | production-zone-1: 259 | replicas: 50 260 | request_cpu: 1000m 261 | request_memory: 1G 262 | production: "true" 263 | never_give_up: "true" 264 | ``` 265 | In production we need to process n thousand RPS, so set replicas to 50, increase resources and set all production 266 | variables to ready for anything values. 267 | ### Deploy specific environment 268 | In your CI/CD script you can deploy any environment 269 | ```bash 270 | $ k8s-handle deploy -s staging # Or testing or production-zone-1 271 | ``` 272 | In Gitlab CI for example you can create manual job for each environment 273 | 274 | ## Templates 275 | Templates in k8s-handle use jinja2 syntax and support all standard filters + some special 276 | ### Filters 277 | * `{{ my_var | b64encode }}` - encode value of my_var to base64 278 | * `{{ my_var | b64decode }}` - decode value of my_var from base64 279 | * `{{ my_var | hash_sha256 }}` - encode value of my_var to sha256sum 280 | * `{{ my_var | to_yaml(flow_style=True, width=99999) }}` - Tries to render yaml representation of given variable(flow_style=True - render in one line, False multiline. width - max line width for rendered yaml lines) 281 | > Warning: You can use filters only for templates and can't for config.yaml 282 | ### Functions 283 | * `{{ include_file('my_file.txt') }}` - include my_file.txt to resulting resource w/o parsing it, useful for include configs to configmap. 284 | my_file.txt will be searched in parent directory of templates dir(most of the time - k8s-handle project dir): 285 | ```bash 286 | $ ls -1 287 | config.yaml 288 | templates 289 | my_file.txt 290 | ... 291 | ``` 292 | * `{{ list_files('dir/or/glob*') }}` - returns list of files in specified directory. Useful for including all files in folder to configmap. You specify directory path relative to parent of templates folder. 293 | > Note, both fuctions support unix glob. You can import all files from directory `conf.d/*.conf` for example. 294 | 295 | You can put *.j2 templates in 'templates' directory and specify it in config.yaml 296 | ```yaml 297 | testing: 298 | replicas: 1 299 | request_cpu: 100m 300 | request_memory: 128M 301 | some_option: disabled 302 | templates: 303 | - template: my-deployment.yaml.j2 304 | ``` 305 | the same template you can use in each section you want: 306 | ```yaml 307 | staging: 308 | ... 309 | templates: 310 | - template: my-deployment.yaml.j2 311 | 312 | production-zone-1: 313 | ... 314 | templates: 315 | - template: my-deployment.yaml.j2 316 | ``` 317 | 318 | You can use regular expressions (not glob) for templates selection in TEMPLATES_DIR: 319 | ```yaml 320 | cluster-1: 321 | ... 322 | templates: 323 | - template: dir-1/.* # All files at TEMPLATES_DIR/dir-1 will be recognised as template and rendered 324 | ``` 325 | 326 | ### Template loader path 327 | k8s-handle uses jinja2 template engine and initializes it with base folder specified in the TEMPLATES_DIR env variable. 328 | Jinja environment considers template paths as specified relatively to its base init directory. 329 | 330 | Therefore, users **must** specify paths in `{% include %}` (and other) blocks relatively to the base (TEMPLATES_DIR) folder, not relative to the importer template location. 331 | 332 | Example 333 | 334 | We have the following templates dir content layout: 335 | ``` 336 | templates / 337 | subdirectory / 338 | template_A.yaml 339 | template_B.yaml 340 | ``` 341 | In that scheme, if template_A contains jinja2 import of the template_B, that import statement must be 342 | ``` 343 | {% include "subdirectory/template_B.yaml" %} 344 | ``` 345 | despite that included template lies as the same level as the template where include is used. 346 | 347 | ### Tags 348 | If you have a large deployment with many separate parts (for ex. main application and migration job), you can want to deploy them independently. In this case you have two options: 349 | * Use multiple isolated sections (like `production_app`, `production_migration`, etc.) 350 | * Use one section and tag yours templates. For example: 351 | ```yaml 352 | production: 353 | templates: 354 | - template: my-job.yaml.j2 355 | tags: migration 356 | - template: my-configmap.yaml.j2 357 | tags: ['app', 'config'] 358 | - template: my-deployment.yaml.j2 359 | tags: 360 | - app 361 | - deployment 362 | - template: my-service.yaml.j2 363 | tags: "app,service" 364 | ``` 365 | Since you templates are tagged you can use `--tags`/`--skip-tags` keys to partial deploy. For example, you can delete only a migration job: 366 | ``` 367 | k8s-handle destroy --section production --tags migration 368 | ``` 369 | Command line keys `--tags` and `--skip-tags` can be specified multiple times, for ex.: 370 | ``` 371 | k8s-handle deploy --section production --tags=tag1 --tags=tag2 --tags=tag3 372 | ``` 373 | 374 | ### Groups 375 | You can make groups for templates. For example: 376 | ```yaml 377 | production: 378 | templates: 379 | - group: 380 | - template: my-configmap.yaml.j2 381 | - template: my-deployment.yaml.j2 382 | - template: my-service.yaml.j2 383 | tags: service-one 384 | - group: 385 | - template: my-job.yaml.j2 386 | ``` 387 | It is useful for creating different sets of templates for other environments, or tag a bunch of templates at once 388 | 389 | ## Variables 390 | ### Required parameters 391 | k8s-handle needs several parameters to be set in order to connect to k8s, such as: 392 | * K8S master uri 393 | * K8S CA base64 394 | * K8S token 395 | 396 | Each of these parameters can be set in various ways in any combination and are applied with the following order 397 | (from highest to lowest precedence): 398 | 1. From the command line via corresponding keys 399 | 2. From the config.yaml section, lowercase, underscore-delimited, e.g. `k8s_master_uri` 400 | 3. From environment, uppercase, underscore-delimited, e.g `K8S_MASTER_URI` 401 | 402 | If the --use-kubeconfig flag is used, these explicitly specified parameters are ignored. 403 | 404 | In addition, the `K8S namespace` parameter also must be specified. 405 | k8s-handle uses namespace specified in `metadata: namespace` block of a resource. 406 | If it is not present, the default namespace is used, which is evaluated in the following 407 | order (from highest to lowest precedence): 408 | 1. From the config.yaml `k8s_namespace` key 409 | 2. From the kubeconfig `current-context` field, if --use-kubeconfig flag is used 410 | 3. From the environment `K8S_NAMESPACE` variable 411 | 412 | If the namespace is not specified in the resource spec, and the default namespace is also not specified, 413 | this will lead to a provisioning error. 414 | 415 | The one of the common ways is to specify connection parameters and/or k8s_namespace in the `common` section of your 416 | config.yaml, but you can do it in another way if necessary. 417 | 418 | Thus, the k8s-handle provides flexible ways to set the required parameters. 419 | 420 | ### Merging with common 421 | All variables defined in `common` will be merged with deployed section and available as context dict in templates rendering, 422 | for example: 423 | ```yaml 424 | common: 425 | common_var: common_value 426 | testing: 427 | testing_variable: testing_value 428 | ``` 429 | After the rendering of this template some-file.txt.j2: 430 | ```txt 431 | common_var = {{ common_var }} 432 | testing_variable = {{ testing_variable }} 433 | ``` 434 | file some-file.txt will be generated with the following content: 435 | ```txt 436 | common_var = common_value 437 | testing_variable = testing_value 438 | ``` 439 | 440 | If the variable is declared both in `common` section and the selected one, the value from the 441 | selected section will be chosen. 442 | 443 | If the particular variable is a dictionary in both (`common` and the selected one) sections, resulting variable 444 | will contain merge of these two dictionaries. 445 | 446 | ### Load variables from environment 447 | If you want to use environment variables in your templates(for docker image tag generated by build for example), 448 | you can use next construction in config.yaml: 449 | ```yaml 450 | common: 451 | image_version: "{{ env='TAG' }}" 452 | ``` 453 | ### Load variables from yaml file 454 | ```yaml 455 | common: 456 | test: "{{ file='include.yaml' }}" 457 | ``` 458 | include.yaml: 459 | ```yaml 460 | - 1 461 | - 2 462 | - 3 463 | ``` 464 | template: 465 | ```text 466 | {{ test[0] }} 467 | {{ test[1] }} 468 | {{ test[2] }} 469 | ``` 470 | After rendering you get: 471 | ```text 472 | 1 473 | 2 474 | 3 475 | ``` 476 | ## How to use in CI/CD 477 | ### Gitlab CI 478 | #### Native integration 479 | Use Gitlab CI integration with Kubernetes (https://docs.gitlab.com/ee/user/project/clusters/index.html#adding-an-existing-kubernetes-cluster) 480 | .gitlab-ci.yaml: 481 | ```yaml 482 | deploy: 483 | image: 2gis/k8s-handle:latest 484 | script: 485 | - k8s-handle deploy --section --use-kubeconfig 486 | ``` 487 | #### Through variables 488 | Alternatively you can setup Gitlab CI variables: 489 | * K8S_TOKEN_STAGING = < serviceaccount token for staging > 490 | * K8S_TOKEN_PRODUCTION = < serviceaccount token for production > 491 | > Don't forget mark variables as protected 492 | 493 | then add next lines to config.yaml 494 | ```yaml 495 | staging: 496 | k8s_master_uri: 497 | k8s_token: "{{ env='K8S_TOKEN_STAGING' }}" 498 | k8s_ca_base64: 499 | 500 | production: 501 | k8s_master_uri: 502 | k8s_token: "{{ env='K8S_TOKEN_PRODUCTION' }}" 503 | k8s_ca_base64: 504 | ``` 505 | Now just run proper gitlab job(without --use-kubeconfig option): 506 | ```yaml 507 | deploy: 508 | image: 2gis/k8s-handle:latest 509 | script: 510 | - k8s-handle deploy --section 511 | ``` 512 | ## Working modes 513 | ### Sync mode 514 | > Works only with Deployment, Job, StatefulSet and DaemonSet 515 | 516 | By default k8s-handle just apply resources to kubernetes and exit. In sync mode k8s-handle wait for resources up and 517 | running 518 | ```bash 519 | $ k8s-handle deploy --section staging --sync-mode 520 | ... 521 | INFO:k8s.resource:Deployment "k8s-starter-kit" already exists, replace it 522 | INFO:k8s.resource:desiredReplicas = 3, updatedReplicas = 3, availableReplicas = 1 523 | INFO:k8s.resource:Deployment not completed on 1 attempt, next attempt in 5 sec. 524 | INFO:k8s.resource:desiredReplicas = 3, updatedReplicas = 3, availableReplicas = 2 525 | INFO:k8s.resource:Deployment not completed on 2 attempt, next attempt in 5 sec. 526 | INFO:k8s.resource:desiredReplicas = 3, updatedReplicas = 3, availableReplicas = 3 527 | INFO:k8s.resource:Deployment completed on 3 attempt 528 | ... 529 | ``` 530 | You can specify number of tries before k8s-handle exit with non zero exit code and delay before checks: 531 | ```bash 532 | --tries (360 by default) 533 | --retry-delay (5 by default) 534 | ``` 535 | ### Strict mode 536 | In some cases k8s-handle warn you about ambiguous situations and keep working. With `--strict` mode k8s-handle warn and exit 537 | with non zero code. For example when some used environment variables is empty. 538 | ```bash 539 | $ k8s-handle deploy -s staging --use-kubeconfig --strict 540 | ERROR:__main__:RuntimeError: Environment variable "IMAGE_VERSION" is not set 541 | $ echo $? 542 | 1 543 | ``` 544 | ### Destroy 545 | In some cases you need to destroy early created resources(demo env, deploy from git branches, testing etc.), k8s-handle 546 | support `destroy` subcommand for you. Just use `destroy` instead of `deploy`. k8s-handle process destroy as deploy, but 547 | call delete kubernetes api calls instead of create or replace. 548 | > Sync mode is available for destroy as well. 549 | 550 | ### Diff 551 | You can get diff between objects in Kubernetes API and local working copy of configuration. 552 | ```bash 553 | $ k8s-handle diff -s
--use-kubeconfig 554 | ``` 555 | > Secrets are ignored by security reasons 556 | 557 | ## Operating without config.yaml 558 | The most common way for the most of use cases is to operate with k8s-handle via `config.yaml`, specifying 559 | connection parameters, targets (sections and tags) and variables in one file. The deploy command that runs after that, 560 | at first will trigger templating process: filling your spec templates with variables, creating resource spec files. 561 | That files become a targets for the provisioner module, which does attempts to create K8S resources. 562 | 563 | But in some cases, such as the intention to use your own templating engine or, probably, necessity to make specs 564 | beforehand and to deploy them separately and later, there may be a need to divide the process into the separate steps: 565 | 1. Templating 566 | 2. Direct, `kubectl apply`-like provisioning without config.yaml context. 567 | 568 | For this reason, `k8s-handle render`, `k8s-handle apply`, `k8s-handle delete` commands are implemented. 569 | 570 | ### Render 571 | 572 | `render` command is purposed for creating specs from templates without their subsequent deployment. 573 | 574 | Another purpose is to check the generation of the templates: previously, this functionality was achieved by using the 575 | `--dry-run` optional flag. The support of `--dry-run` in `deploy` and `destroy` commands remains at this time for the 576 | sake of backward compatibility but it's **discouraged** for the further usage. 577 | 578 | Just like with `deploy` command, `-s/--section` and `--tags`/`--skip-tags` targeting options are provided to make it 579 | handy to render several specs. Connection parameters are not needed to be specified cause no k8s cluster availability 580 | checks are performed. 581 | 582 | Templates directory path is taken from env `TEMPLATES_DIR` and equal to 'templates' by default. 583 | Resources generated by this command can be obtained in directory that set in `TEMP_DIR` env variable 584 | with default value '/tmp/k8s-handle'. Users that want to preserve generated templates might need to change this default 585 | to avoid loss of the generated resources. 586 | 587 | ``` 588 | TEMP_DIR="/home/custom_dir" k8s-handle render -s staging 589 | 2019-02-15 14:44:44 INFO:k8s_handle.templating:Trying to generate file from template "service.yaml.j2" in "/home/custom_dir" 590 | 2019-02-15 14:44:44 INFO:k8s_handle.templating:File "/home/custom_dir/service.yaml" successfully generated 591 | ``` 592 | 593 | ### Apply 594 | 595 | `apply` command with the `-r/--resource` required flag starts the process of provisioning of separate resource 596 | spec to k8s. 597 | 598 | The value of `-r` key is considered as absolute path if it's started with slash. Otherwise, it's considered as 599 | relative path from directory specified in `TEMP_DIR` env variable. 600 | 601 | No config.yaml-like file is required (and not taken into account even if exists). The connection parameters can be set 602 | via `--use-kubeconfig` mode which is available and the most handy, or via the CLI/env flags and variables. 603 | Options related to output and syncing, like `--sync-mode`, `--tries` and `--show-logs` are available as well. 604 | 605 | ``` 606 | $ k8s-handle apply -r /tmp/k8s-handle/service.yaml --use-kubeconfig 607 | 2019-02-15 14:22:58 INFO:k8s_handle:Default namespace "test" 608 | 2019-02-15 14:22:58 INFO:k8s_handle.k8s.resource:Using namespace "test" 609 | 2019-02-15 14:22:58 INFO:k8s_handle.k8s.resource:Service "k8s-handle-example" does not exist, create it 610 | 611 | ``` 612 | 613 | ### Delete 614 | `delete` command with the `-r/--resource` required flag acts similarly to `destroy` command and does a try to delete 615 | the directly specified resource from k8s if any. 616 | 617 | ``` 618 | $ k8s-handle delete -r service.yaml --use-kubeconfig 619 | 620 | 2019-02-15 14:24:06 INFO:k8s_handle:Default namespace "test" 621 | 2019-02-15 14:24:06 INFO:k8s_handle.k8s.resource:Using namespace "test" 622 | 2019-02-15 14:24:06 INFO:k8s_handle.k8s.resource:Trying to delete Service "k8s-handle-example" 623 | 2019-02-15 14:24:06 INFO:k8s_handle.k8s.resource:Service "k8s-handle-example" deleted 624 | ``` 625 | 626 | ### Custom resource definitions and custom resources 627 | Since version 0.5.5 k8s-handle supports Custom resource definition (CRD) and custom resource (CR) kinds. 628 | If your deployment involves use of such kinds, make sure that CRD was deployed before CR and check correctness of the CRD's scope. 629 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-cayman -------------------------------------------------------------------------------- /helmVsK8s-handle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/2gis/k8s-handle/144fab41f5c59f458333a9e50777641b96b651de/helmVsK8s-handle.png -------------------------------------------------------------------------------- /k8s_handle/__init__.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import os 4 | import sys 5 | 6 | from kubernetes import client 7 | from kubernetes.config import list_kube_config_contexts, load_kube_config 8 | 9 | from k8s_handle import config 10 | from k8s_handle import settings 11 | from k8s_handle import templating 12 | from k8s_handle.exceptions import ProvisioningError, ResourceNotAvailableError 13 | from k8s_handle.filesystem import InvalidYamlError 14 | from k8s_handle.k8s.provisioner import Provisioner 15 | from k8s_handle.k8s.diff import Diff 16 | 17 | COMMAND_DEPLOY = 'deploy' 18 | COMMAND_DIFF = 'diff' 19 | COMMAND_DESTROY = 'destroy' 20 | 21 | log = logging.getLogger(__name__) 22 | logging.basicConfig(level=settings.LOG_LEVEL, format=settings.LOG_FORMAT, datefmt=settings.LOG_DATE_FORMAT) 23 | 24 | 25 | def handler_deploy(args): 26 | _handler_deploy_destroy(args, COMMAND_DEPLOY) 27 | 28 | 29 | def handler_destroy(args): 30 | _handler_deploy_destroy(args, COMMAND_DESTROY) 31 | 32 | 33 | def handler_apply(args): 34 | _handler_apply_delete(args, COMMAND_DEPLOY) 35 | 36 | 37 | def handler_delete(args): 38 | _handler_apply_delete(args, COMMAND_DESTROY) 39 | 40 | 41 | def handler_render(args): 42 | context = config.load_context_section(args.get('section')) 43 | templating.Renderer( 44 | settings.TEMPLATES_DIR, 45 | args.get('tags'), 46 | args.get('skip_tags') 47 | ).generate_by_context(context) 48 | 49 | 50 | def handler_diff(args): 51 | _handler_deploy_destroy(args, COMMAND_DIFF) 52 | 53 | 54 | def _handler_deploy_destroy(args, command): 55 | context = config.load_context_section(args.get('section')) 56 | resources = templating.Renderer( 57 | settings.TEMPLATES_DIR, 58 | args.get('tags'), 59 | args.get('skip_tags') 60 | ).generate_by_context(context) 61 | 62 | if args.get('dry_run'): 63 | return 64 | 65 | _handler_provision( 66 | command, 67 | resources, 68 | config.PriorityEvaluator(args, context, os.environ), 69 | args.get('use_kubeconfig'), 70 | args.get('sync_mode'), 71 | args.get('show_logs') 72 | ) 73 | 74 | 75 | def _handler_apply_delete(args, command): 76 | _handler_provision( 77 | command, 78 | [os.path.join(settings.TEMP_DIR, args.get('resource'))], 79 | config.PriorityEvaluator(args, {}, os.environ), 80 | args.get('use_kubeconfig'), 81 | args.get('sync_mode'), 82 | args.get('show_logs') 83 | ) 84 | 85 | 86 | def _handler_provision(command, resources, priority_evaluator, use_kubeconfig, sync_mode, show_logs): 87 | kubeconfig_namespace = None 88 | 89 | if priority_evaluator.environment_deprecated(): 90 | log.warning("K8S_HOST and K8S_CA environment variables support is deprecated " 91 | "and will be discontinued in the future. Use K8S_MASTER_URI and K8S_CA_BASE64 instead.") 92 | 93 | # INFO rvadim: https://github.com/kubernetes-client/python/issues/430#issuecomment-359483997 94 | if use_kubeconfig: 95 | try: 96 | load_kube_config() 97 | kubeconfig_namespace = list_kube_config_contexts()[1].get('context').get('namespace') 98 | except Exception as e: 99 | raise RuntimeError(e) 100 | else: 101 | client.Configuration.set_default(priority_evaluator.k8s_client_configuration()) 102 | 103 | settings.K8S_NAMESPACE = priority_evaluator.k8s_namespace_default(kubeconfig_namespace) 104 | log.info('Default namespace "{}"'.format(settings.K8S_NAMESPACE)) 105 | 106 | if not settings.K8S_NAMESPACE: 107 | log.info("Default namespace is not set. " 108 | "This may lead to provisioning error, if namespace is not set for each resource.") 109 | 110 | if command == COMMAND_DIFF: 111 | executor = Diff() 112 | else: 113 | executor = Provisioner(command, sync_mode, show_logs) 114 | 115 | for resource in resources: 116 | executor.run(resource) 117 | 118 | 119 | parser = argparse.ArgumentParser(description='CLI utility generate k8s resources by templates and apply it to cluster') 120 | subparsers = parser.add_subparsers(dest="command") 121 | subparsers.required = True 122 | 123 | parser_target_config = argparse.ArgumentParser(add_help=False) 124 | parser_target_config.add_argument('-s', '--section', required=True, type=str, help='Section to deploy from config file') 125 | parser_target_config.add_argument('-c', '--config', required=False, help='Config file, default: config.yaml') 126 | parser_target_config.add_argument('--tags', action='append', required=False, 127 | help='Only use templates tagged with these values') 128 | parser_target_config.add_argument('--skip-tags', action='append', required=False, 129 | help='Only use templates whose tags do not match these values') 130 | 131 | parser_target_resource = argparse.ArgumentParser(add_help=False) 132 | parser_target_resource.add_argument('-r', '--resource', required=True, type=str, 133 | help='Resource spec path, absolute (started with slash) or relative from TEMP_DIR') 134 | 135 | parser_deprecated = argparse.ArgumentParser(add_help=False) 136 | parser_deprecated.add_argument('--dry-run', required=False, action='store_true', 137 | help='Don\'t run kubectl commands. Deprecated, use "k8s-handle template" instead') 138 | 139 | parser_provisioning = argparse.ArgumentParser(add_help=False) 140 | parser_provisioning.add_argument('--sync-mode', action='store_true', required=False, default=False, 141 | help='Turn on sync mode and wait deployment ending') 142 | parser_provisioning.add_argument('--tries', type=int, required=False, default=360, 143 | help='Count of tries to check deployment status') 144 | parser_provisioning.add_argument('--retry-delay', type=int, required=False, default=5, 145 | help='Sleep between tries in seconds') 146 | parser_provisioning.add_argument('--strict', action='store_true', required=False, 147 | help='Check existence of all env variables in config.yaml and stop if var is not set') 148 | parser_provisioning.add_argument('--use-kubeconfig', action='store_true', required=False, 149 | help='Try to use kube config') 150 | parser_provisioning.add_argument('--k8s-handle-debug', action='store_true', required=False, 151 | help='Show K8S client debug messages') 152 | 153 | parser_logs = argparse.ArgumentParser(add_help=False) 154 | parser_logs.add_argument('--show-logs', action='store_true', required=False, default=False, help='Show logs for jobs') 155 | parser_logs.add_argument('--tail-lines', type=int, required=False, help='Lines of recent log file to display') 156 | 157 | arguments_connection = parser_provisioning.add_argument_group() 158 | arguments_connection.add_argument('--k8s-master-uri', required=False, help='K8S master to connect to') 159 | arguments_connection.add_argument('--k8s-ca-base64', required=False, help='base64-encoded K8S certificate authority') 160 | arguments_connection.add_argument('--k8s-token', required=False, help='K8S token to use') 161 | 162 | parser_deploy = subparsers.add_parser( 163 | 'deploy', 164 | parents=[parser_provisioning, parser_target_config, parser_logs, parser_deprecated], 165 | help='Do attempt to create specs from templates and deploy K8S resources of the selected section') 166 | parser_deploy.set_defaults(func=handler_deploy) 167 | 168 | parser_apply = subparsers.add_parser('apply', parents=[parser_provisioning, parser_target_resource, parser_logs], 169 | help='Do attempt to deploy K8S resource from the existing spec') 170 | parser_apply.set_defaults(func=handler_apply) 171 | 172 | parser_destroy = subparsers.add_parser('destroy', 173 | parents=[parser_provisioning, parser_target_config, parser_deprecated], 174 | help='Do attempt to destroy K8S resources of the selected section') 175 | parser_destroy.set_defaults(func=handler_destroy) 176 | 177 | parser_delete = subparsers.add_parser('delete', parents=[parser_provisioning, parser_target_resource], 178 | help='Do attempt to destroy K8S resource from the existing spec') 179 | parser_delete.set_defaults(func=handler_delete) 180 | 181 | parser_template = subparsers.add_parser('render', parents=[parser_target_config], 182 | help='Make resources from the template and config. ' 183 | 'Created resources will be placed into the TEMP_DIR') 184 | parser_template.set_defaults(func=handler_render) 185 | 186 | parser_diff = subparsers.add_parser('diff', parents=[parser_target_config], 187 | help='Show diff between current rendered yamls and apiserver yamls') 188 | parser_diff.add_argument('--use-kubeconfig', action='store_true', required=False, 189 | help='Try to use kube config') 190 | parser_diff.set_defaults(func=handler_diff) 191 | 192 | 193 | def main(): 194 | # INFO furiousassault: backward compatibility rough attempt 195 | # must be removed later according to https://github.com/2gis/k8s-handle/issues/40 196 | deprecation_warnings = 0 197 | filtered_arguments = [] 198 | 199 | for argument in sys.argv[1:]: 200 | if argument in ['--sync-mode=true', '--sync-mode=True', '--dry-run=true', '--dry-run=True']: 201 | deprecation_warnings += 1 202 | filtered_arguments.append(argument.split('=')[0]) 203 | continue 204 | 205 | if argument in ['--sync-mode=false', '--sync-mode=False', '--dry-run=false', '--dry-run=False']: 206 | deprecation_warnings += 1 207 | continue 208 | 209 | filtered_arguments.append(argument) 210 | 211 | args, unrecognized_args = parser.parse_known_args(filtered_arguments) 212 | 213 | if deprecation_warnings or unrecognized_args: 214 | log.warning("Explicit true/false arguments to --sync-mode and --dry-run keys are deprecated " 215 | "and will be discontinued in the future. Use these keys without arguments instead.") 216 | 217 | args_dict = vars(args) 218 | settings.CHECK_STATUS_TRIES = args_dict.get('tries') 219 | settings.CHECK_DAEMONSET_STATUS_TRIES = args_dict.get('tries') 220 | settings.CHECK_STATUS_TIMEOUT = args_dict.get('retry_delay') 221 | settings.CHECK_DAEMONSET_STATUS_TIMEOUT = args_dict.get('retry_delay') 222 | settings.GET_ENVIRON_STRICT = args_dict.get('strict') 223 | settings.COUNT_LOG_LINES = args_dict.get('tail_lines') 224 | settings.CONFIG_FILE = args_dict.get('config') or settings.CONFIG_FILE 225 | 226 | try: 227 | args.func(args_dict) 228 | except templating.TemplateRenderingError as e: 229 | log.error('Template generation error: {}'.format(e)) 230 | sys.exit(1) 231 | except InvalidYamlError as e: 232 | log.error('{}'.format(e)) 233 | sys.exit(1) 234 | except RuntimeError as e: 235 | log.error('RuntimeError: {}'.format(e)) 236 | sys.exit(1) 237 | except ResourceNotAvailableError as e: 238 | log.error('Resource not available: {}'.format(e)) 239 | sys.exit(1) 240 | except ProvisioningError: 241 | sys.exit(1) 242 | 243 | print(r''' 244 | _(_)_ wWWWw _ 245 | @@@@ (_)@(_) vVVVv _ @@@@ (___) _(_)_ 246 | @@()@@ wWWWw (_)\ (___) _(_)_ @@()@@ Y (_)@(_) 247 | @@@@ (___) `|/ Y (_)@(_) @@@@ \|/ (_) 248 | / Y \| \|/ /(_) \| |/ | 249 | \ | \ |/ | / \ | / \|/ |/ \| \|/ 250 | \|// \|/// \|// \|/// \|/// \|// |// \|// 251 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^''') 252 | -------------------------------------------------------------------------------- /k8s_handle/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from . import main 4 | 5 | if __name__ == '__main__': 6 | main() 7 | -------------------------------------------------------------------------------- /k8s_handle/config.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import logging 3 | import os 4 | import re 5 | 6 | from kubernetes import client 7 | 8 | from k8s_handle import settings 9 | from k8s_handle.dictionary import merge 10 | from k8s_handle.filesystem import load_yaml, write_file_tmp 11 | from k8s_handle.templating import b64decode 12 | 13 | log = logging.getLogger(__name__) 14 | 15 | INCLUDE_RE = re.compile(r'{{\s?file\s?=\s?\'(?P[^\']*)\'\s?}}') 16 | CUSTOM_ENV_RE = r'{{\s*env\s*=\s*\'([^\']*)\'\s*}}' 17 | 18 | KEY_USE_KUBECONFIG = 'use_kubeconfig' 19 | KEY_K8S_MASTER_URI = 'k8s_master_uri' 20 | KEY_K8S_MASTER_URI_ENV = KEY_K8S_MASTER_URI.upper() 21 | KEY_K8S_MASTER_URI_ENV_DEPRECATED = 'K8S_HOST' 22 | 23 | KEY_K8S_CA_BASE64 = 'k8s_ca_base64' 24 | KEY_K8S_CA_BASE64_ENV = KEY_K8S_CA_BASE64.upper() 25 | KEY_K8S_CA_BASE64_URI_ENV_DEPRECATED = 'K8S_CA' 26 | 27 | KEY_K8S_TOKEN = 'k8s_token' 28 | KEY_K8S_TOKEN_ENV = KEY_K8S_TOKEN.upper() 29 | 30 | KEY_K8S_NAMESPACE = 'k8s_namespace' 31 | KEY_K8S_NAMESPACE_ENV = KEY_K8S_NAMESPACE.upper() 32 | KEY_K8S_HANDLE_DEBUG = 'k8s_handle_debug' 33 | 34 | 35 | class PriorityEvaluator: 36 | def __init__(self, cli_arguments, context_arguments, environment): 37 | self.cli_arguments = cli_arguments 38 | self.context_arguments = context_arguments 39 | self.environment = environment 40 | self.loaded = False 41 | 42 | def k8s_namespace_default(self, kubeconfig_namespace=None): 43 | return PriorityEvaluator._first( 44 | self.context_arguments.get(KEY_K8S_NAMESPACE), 45 | kubeconfig_namespace, 46 | self.environment.get(KEY_K8S_NAMESPACE_ENV)) 47 | 48 | def k8s_client_configuration(self): 49 | for parameter, value in { 50 | KEY_K8S_MASTER_URI: self._k8s_master_uri(), 51 | KEY_K8S_TOKEN: self._k8s_token() 52 | }.items(): 53 | if value: 54 | continue 55 | 56 | raise RuntimeError( 57 | '{0} parameter is not set. Please, provide {0} via CLI, config or env.'.format(parameter)) 58 | 59 | configuration = client.Configuration() 60 | configuration.host = self._k8s_master_uri() 61 | 62 | if self._k8s_ca_base64(): 63 | configuration.ssl_ca_cert = write_file_tmp(b64decode(self._k8s_ca_base64()).encode('utf-8')) 64 | 65 | configuration.api_key = {"authorization": "Bearer " + self._k8s_token()} 66 | configuration.debug = self._k8s_handle_debug() 67 | return configuration 68 | 69 | def environment_deprecated(self): 70 | return self.environment.get(KEY_K8S_MASTER_URI_ENV_DEPRECATED) or \ 71 | self.environment.get(KEY_K8S_CA_BASE64_URI_ENV_DEPRECATED) 72 | 73 | def _k8s_master_uri(self): 74 | return PriorityEvaluator._first( 75 | self.cli_arguments.get(KEY_K8S_MASTER_URI), 76 | self.context_arguments.get(KEY_K8S_MASTER_URI), 77 | self.environment.get(KEY_K8S_MASTER_URI_ENV), 78 | self.environment.get(KEY_K8S_MASTER_URI_ENV_DEPRECATED)) 79 | 80 | def _k8s_ca_base64(self): 81 | return PriorityEvaluator._first( 82 | self.cli_arguments.get(KEY_K8S_CA_BASE64), 83 | self.context_arguments.get(KEY_K8S_CA_BASE64), 84 | self.environment.get(KEY_K8S_CA_BASE64_ENV), 85 | self.environment.get(KEY_K8S_CA_BASE64_URI_ENV_DEPRECATED)) 86 | 87 | def _k8s_token(self): 88 | return PriorityEvaluator._first( 89 | self.cli_arguments.get(KEY_K8S_TOKEN), 90 | self.context_arguments.get(KEY_K8S_TOKEN), 91 | self.environment.get(KEY_K8S_TOKEN_ENV)) 92 | 93 | def _k8s_handle_debug(self): 94 | return PriorityEvaluator._first( 95 | self.cli_arguments.get(KEY_K8S_HANDLE_DEBUG), 96 | self.context_arguments.get(KEY_K8S_HANDLE_DEBUG) in [True, 'true', 'True']) 97 | 98 | @staticmethod 99 | def _first(*arguments): 100 | if not arguments: 101 | return None 102 | 103 | for argument in arguments: 104 | if not argument: 105 | continue 106 | 107 | return argument 108 | 109 | return None 110 | 111 | 112 | def _process_variable(variable): 113 | matches = INCLUDE_RE.match(variable) 114 | 115 | if matches: 116 | return load_yaml(matches.groupdict().get('file')) 117 | 118 | try: 119 | return re.sub(CUSTOM_ENV_RE, lambda m: os.environ[m.group(1)], variable) 120 | 121 | except KeyError as err: 122 | log.debug('Environment variable "{}" is not set'.format(err.args[0])) 123 | if settings.GET_ENVIRON_STRICT: 124 | raise RuntimeError('Environment variable "{}" is not set'.format(err.args[0])) 125 | 126 | return re.sub(CUSTOM_ENV_RE, lambda m: os.environ.get(m.group(1), ''), variable) 127 | 128 | 129 | def _update_single_variable(value, include_history): 130 | if value in include_history: 131 | raise RuntimeError('Infinite include loop') 132 | 133 | local_history = copy.copy(include_history) 134 | local_history.append(value) 135 | 136 | return _update_context_recursively(_process_variable(value), local_history) 137 | 138 | 139 | def _update_context_recursively(context, include_history=[]): 140 | if isinstance(context, dict): 141 | output = {} 142 | for key, value in context.items(): 143 | if isinstance(value, str): 144 | output[key] = _update_single_variable(value, include_history) 145 | else: 146 | output[key] = _update_context_recursively(value) 147 | return output 148 | elif isinstance(context, list): 149 | output = [] 150 | for value in context: 151 | if isinstance(value, str): 152 | output.append(_update_single_variable(value, include_history)) 153 | else: 154 | output.append(_update_context_recursively(value)) 155 | return output 156 | else: 157 | return context 158 | 159 | 160 | def load_context_section(section): 161 | if not section: 162 | raise RuntimeError('Empty section specification is not allowed') 163 | 164 | if section == settings.COMMON_SECTION_NAME: 165 | raise RuntimeError('Section "{}" is not intended to deploy'.format(settings.COMMON_SECTION_NAME)) 166 | 167 | context = load_yaml(settings.CONFIG_FILE) 168 | 169 | if context is None: 170 | raise RuntimeError('Config file "{}" is empty'.format(settings.CONFIG_FILE)) 171 | 172 | if section not in context: 173 | raise RuntimeError('Section "{}" not found in config file "{}"'.format(section, settings.CONFIG_FILE)) 174 | 175 | # delete all sections except common and used section 176 | context.setdefault(settings.COMMON_SECTION_NAME, {}) 177 | context = {key: context[key] for key in [settings.COMMON_SECTION_NAME, section]} 178 | context = _update_context_recursively(context) 179 | 180 | if section and section in context: 181 | context = merge(context[settings.COMMON_SECTION_NAME], context[section]) 182 | 183 | if 'templates' not in context and 'kubectl' not in context: 184 | raise RuntimeError( 185 | 'Section "templates" or "kubectl" not found in config file "{}"'.format(settings.CONFIG_FILE)) 186 | 187 | validate_dashes(context) 188 | return context 189 | 190 | 191 | def get_all_root_keys(result, d): 192 | for key, value in d.items(): 193 | result.append(key) 194 | 195 | return result 196 | 197 | 198 | def get_vars_with_dashes(vars_list): 199 | return [var_name for var_name in vars_list if '-' in var_name] 200 | 201 | 202 | def validate_dashes(context): 203 | all_keys = get_all_root_keys([], context) 204 | dashes = get_vars_with_dashes(all_keys) 205 | if len(dashes) != 0: 206 | raise RuntimeError('Root variable names should never include dashes, ' 207 | 'check your vars please: {}'.format(', '.join(sorted(dashes)))) 208 | -------------------------------------------------------------------------------- /k8s_handle/dictionary.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | 4 | def merge(dict_x, dict_y): 5 | result = copy.deepcopy(dict_x) 6 | for key, value in dict_y.items(): 7 | if isinstance(value, dict) and isinstance(result.get(key), dict): 8 | result[key] = merge(result[key], value) 9 | continue 10 | 11 | result[key] = value 12 | 13 | return result 14 | -------------------------------------------------------------------------------- /k8s_handle/exceptions.py: -------------------------------------------------------------------------------- 1 | class ProvisioningError(Exception): 2 | pass 3 | 4 | 5 | class ResourceNotAvailableError(Exception): 6 | pass 7 | 8 | 9 | class InvalidWarningHeader(Exception): 10 | pass 11 | 12 | 13 | class InvalidYamlError(Exception): 14 | pass 15 | 16 | 17 | class TemplateRenderingError(Exception): 18 | pass 19 | -------------------------------------------------------------------------------- /k8s_handle/filesystem.py: -------------------------------------------------------------------------------- 1 | import atexit 2 | import logging 3 | import os 4 | import tempfile 5 | 6 | import yaml 7 | 8 | from k8s_handle.exceptions import InvalidYamlError 9 | 10 | # furiousassault RE: it's not a good practice to log from utility function 11 | # maybe we should pass os.remove failure silently, it doesn't seem so important 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | def load_yaml(path): 16 | try: 17 | with open(path) as f: 18 | return yaml.safe_load(f.read()) 19 | except Exception as e: 20 | raise InvalidYamlError("file '{}' doesn't contain valid yaml: {}".format( 21 | path, e)) 22 | 23 | 24 | def write_file_tmp(data): 25 | def remove_file(file_path): 26 | try: 27 | os.remove(file_path) 28 | except Exception as e: 29 | log.warning('Unable to remove "{}", due to "{}"'.format(file_path, e)) 30 | 31 | f = tempfile.NamedTemporaryFile(delete=False) 32 | f.write(data) 33 | f.flush() 34 | atexit.register(remove_file, f.name) 35 | return f.name 36 | -------------------------------------------------------------------------------- /k8s_handle/k8s/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/2gis/k8s-handle/144fab41f5c59f458333a9e50777641b96b651de/k8s_handle/k8s/__init__.py -------------------------------------------------------------------------------- /k8s_handle/k8s/adapters.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from time import sleep 3 | 4 | from kubernetes import client 5 | from kubernetes.client.rest import ApiException 6 | 7 | from k8s_handle import settings 8 | from k8s_handle.exceptions import ProvisioningError 9 | from k8s_handle.transforms import add_indent, split_str_by_capital_letters 10 | from .api_extensions import ResourcesAPI 11 | from .api_clients import ApiClientWithWarningHandler 12 | from .mocks import K8sClientMock 13 | 14 | log = logging.getLogger(__name__) 15 | 16 | RE_CREATE_TRIES = 10 17 | RE_CREATE_TIMEOUT = 1 18 | 19 | 20 | class Adapter: 21 | api_versions = { 22 | 'v1': client.CoreV1Api, 23 | 'batch/v1': client.BatchV1Api, 24 | 'policy/v1': client.PolicyV1Api, 25 | 'storage.k8s.io/v1': client.StorageV1Api, 26 | 'apps/v1': client.AppsV1Api, 27 | 'autoscaling/v1': client.AutoscalingV1Api, 28 | 'autoscaling/v2': client.AutoscalingV2Api, 29 | 'rbac.authorization.k8s.io/v1': client.RbacAuthorizationV1Api, 30 | 'scheduling.k8s.io/v1': client.SchedulingV1Api, 31 | 'networking.k8s.io/v1': client.NetworkingV1Api, 32 | 'apiextensions.k8s.io/v1': client.ApiextensionsV1Api, 33 | } 34 | kinds_builtin = [ 35 | 'ConfigMap', 'CronJob', 'DaemonSet', 'Deployment', 'Endpoints', 36 | 'Job', 'Namespace', 'PodDisruptionBudget', 'ResourceQuota', 37 | 'Secret', 'Service', 'ServiceAccount', 'StatefulSet', 'StorageClass', 38 | 'PersistentVolume', 'PersistentVolumeClaim', 'HorizontalPodAutoscaler', 39 | 'Role', 'RoleBinding', 'ClusterRole', 'ClusterRoleBinding', 'CustomResourceDefinition', 40 | 'PriorityClass', 'PodSecurityPolicy', 'LimitRange', 'NetworkPolicy' 41 | ] 42 | 43 | def __init__(self, spec): 44 | self.body = spec 45 | self.kind = spec.get('kind', "") 46 | self.name = spec.get('metadata', {}).get('name') 47 | self.namespace = spec.get('metadata', {}).get('namespace', "") or settings.K8S_NAMESPACE 48 | 49 | @staticmethod 50 | def get_instance(spec, api_custom_objects=None, api_resources=None, warning_handler=None): 51 | api_client = ApiClientWithWarningHandler(warning_handler=warning_handler) 52 | 53 | # due to https://github.com/kubernetes-client/python/issues/387 54 | if spec.get('kind') in Adapter.kinds_builtin: 55 | if spec.get('apiVersion') == 'test/test': 56 | return AdapterBuiltinKind(spec, K8sClientMock(spec.get('metadata', {}).get('name'))) 57 | 58 | api = Adapter.api_versions.get(spec.get('apiVersion')) 59 | 60 | if not api: 61 | return None 62 | 63 | return AdapterBuiltinKind(spec, api(api_client=api_client)) 64 | 65 | api_custom_objects = api_custom_objects or client.CustomObjectsApi(api_client=api_client) 66 | api_resources = api_resources or ResourcesAPI(api_client=api_client) 67 | return AdapterCustomKind(spec, api_custom_objects, api_resources) 68 | 69 | 70 | class AdapterBuiltinKind(Adapter): 71 | def __init__(self, spec, api=None): 72 | super().__init__(spec) 73 | self.kind = split_str_by_capital_letters(spec['kind']) 74 | self.replicas = spec.get('spec', {}).get('replicas') 75 | self.api = api 76 | 77 | def get(self): 78 | try: 79 | if hasattr(self.api, "read_namespaced_{}".format(self.kind)): 80 | response = getattr(self.api, 'read_namespaced_{}'.format(self.kind))( 81 | self.name, namespace=self.namespace) 82 | else: 83 | response = getattr(self.api, 'read_{}'.format(self.kind))(self.name) 84 | except ApiException as e: 85 | if e.reason == 'Not Found': 86 | return None 87 | log.error('Exception when calling "read_namespaced_{}": {}'.format(self.kind, add_indent(e.body))) 88 | raise ProvisioningError(e) 89 | 90 | return response 91 | 92 | def get_pods_by_selector(self, label_selector): 93 | try: 94 | if not isinstance(self.api, K8sClientMock): 95 | self.api = client.CoreV1Api() 96 | 97 | return self.api.list_namespaced_pod( 98 | namespace=self.namespace, label_selector='job-name={}'.format(label_selector)) 99 | 100 | except ApiException as e: 101 | log.error('Exception when calling CoreV1Api->list_namespaced_pod: {}', e) 102 | raise e 103 | 104 | def read_pod_status(self, name): 105 | try: 106 | if not isinstance(self.api, K8sClientMock): 107 | self.api = client.CoreV1Api() 108 | 109 | return self.api.read_namespaced_pod_status(name, namespace=self.namespace) 110 | except ApiException as e: 111 | log.error('Exception when calling CoreV1Api->read_namespaced_pod_status: {}', e) 112 | raise e 113 | 114 | def read_pod_logs(self, name, container): 115 | log.info('Read logs for pod "{}", container "{}"'.format(name, container)) 116 | try: 117 | if not isinstance(self.api, K8sClientMock): 118 | self.api = client.CoreV1Api() 119 | if settings.COUNT_LOG_LINES: 120 | return self.api.read_namespaced_pod_log(name, namespace=self.namespace, timestamps=True, 121 | tail_lines=settings.COUNT_LOG_LINES, container=container) 122 | return self.api.read_namespaced_pod_log(name, namespace=self.namespace, timestamps=True, 123 | container=container) 124 | except ApiException as e: 125 | log.error('Exception when calling CoreV1Api->read_namespaced_pod_log: {}', e) 126 | raise e 127 | 128 | def create(self): 129 | try: 130 | if hasattr(self.api, "create_namespaced_{}".format(self.kind)): 131 | return getattr(self.api, 'create_namespaced_{}'.format(self.kind))( 132 | body=self.body, namespace=self.namespace) 133 | 134 | return getattr(self.api, 'create_{}'.format(self.kind))(body=self.body) 135 | except ApiException as e: 136 | log.error('Exception when calling "create_namespaced_{}": {}'.format(self.kind, add_indent(e.body))) 137 | raise ProvisioningError(e) 138 | except ValueError as e: 139 | log.error(e) 140 | # WORKAROUND: 141 | # - https://github.com/kubernetes-client/gen/issues/52 142 | # - https://github.com/kubernetes-client/python/issues/1098 143 | if self.kind not in ['custom_resource_definition', 'horizontal_pod_autoscaler']: 144 | raise e 145 | 146 | def replace(self, parameters): 147 | try: 148 | if self.kind in ['service', 'custom_resource_definition', 'pod_disruption_budget']: 149 | if 'resourceVersion' in parameters: 150 | self.body['metadata']['resourceVersion'] = parameters['resourceVersion'] 151 | 152 | if self.kind in ['service']: 153 | if 'clusterIP' not in self.body['spec'] and 'clusterIP' in parameters: 154 | self.body['spec']['clusterIP'] = parameters['clusterIP'] 155 | 156 | if self.kind in ['custom_resource_definition']: 157 | return self.api.replace_custom_resource_definition( 158 | self.name, self.body, 159 | ) 160 | 161 | if self.kind in ['service_account']: 162 | return getattr(self.api, 'patch_namespaced_{}'.format(self.kind))( 163 | name=self.name, body=self.body, namespace=self.namespace 164 | ) 165 | 166 | # Use patch() for Secrets with ServiceAccount's token to preserve data fields (ca.crt, token, namespace), 167 | # "kubernetes.io/service-account.uid" annotation and "kubernetes.io/legacy-token-last-used" label 168 | # populated by serviceaccount-token controller. 169 | # 170 | # See for details: 171 | # https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#manually-create-an-api-token-for-a-serviceaccount 172 | if self.kind in ['secret']: 173 | if ('type' in self.body and self.body['type'] == 'kubernetes.io/service-account-token' and 174 | 'annotations' in self.body['metadata'] and 175 | 'kubernetes.io/service-account.name' in self.body['metadata']['annotations']): 176 | 177 | return getattr(self.api, 'patch_namespaced_{}'.format(self.kind))( 178 | name=self.name, body=self.body, namespace=self.namespace 179 | ) 180 | 181 | if hasattr(self.api, "replace_namespaced_{}".format(self.kind)): 182 | return getattr(self.api, 'replace_namespaced_{}'.format(self.kind))( 183 | name=self.name, body=self.body, namespace=self.namespace) 184 | 185 | return getattr(self.api, 'replace_{}'.format(self.kind))( 186 | name=self.name, body=self.body) 187 | except ApiException as e: 188 | if self.kind in ['pod_disruption_budget'] and e.status == 422: 189 | return self.re_create() 190 | log.error('Exception when calling "replace_namespaced_{}": {}'.format(self.kind, add_indent(e.body))) 191 | raise ProvisioningError(e) 192 | 193 | def delete(self): 194 | try: 195 | if hasattr(self.api, "delete_namespaced_{}".format(self.kind)): 196 | return getattr(self.api, 'delete_namespaced_{}'.format(self.kind))( 197 | name=self.name, body=client.V1DeleteOptions(propagation_policy='Foreground'), 198 | namespace=self.namespace) 199 | 200 | return getattr(self.api, 'delete_{}'.format(self.kind))( 201 | name=self.name, body=client.V1DeleteOptions(propagation_policy='Foreground')) 202 | except ApiException as e: 203 | if e.reason == 'Not Found': 204 | return None 205 | log.error('Exception when calling "delete_namespaced_{}": {}'.format(self.kind, add_indent(e.body))) 206 | raise ProvisioningError(e) 207 | 208 | def re_create(self): 209 | log.info('Re-creating {}'.format(self.kind)) 210 | self.body['metadata'].pop('resourceVersion', None) 211 | self.delete() 212 | 213 | for i in range(0, RE_CREATE_TRIES): 214 | if self.get() is not None: 215 | sleep(RE_CREATE_TIMEOUT) 216 | 217 | return self.create() 218 | 219 | 220 | class AdapterCustomKind(Adapter): 221 | def __init__(self, spec, api_custom_objects, api_resources): 222 | super().__init__(spec) 223 | self.api = api_custom_objects 224 | self.api_resources = api_resources 225 | self.plural = None 226 | 227 | try: 228 | api_version_splitted = spec.get('apiVersion').split('/', 1) 229 | self.group = api_version_splitted[0] 230 | self.version = api_version_splitted[1] 231 | except (IndexError, AttributeError): 232 | self.group = None 233 | self.version = None 234 | 235 | resources_list = self.api_resources.list_api_resource_arbitrary(self.group, self.version) 236 | 237 | if not resources_list: 238 | return 239 | 240 | for resource in resources_list.resources: 241 | if resource.kind != self.kind: 242 | continue 243 | 244 | self.plural = resource.name 245 | 246 | if not resource.namespaced: 247 | self.namespace = "" 248 | 249 | break 250 | 251 | def get(self): 252 | self._validate() 253 | 254 | try: 255 | if self.namespace: 256 | return self.api.get_namespaced_custom_object( 257 | self.group, self.version, self.namespace, self.plural, self.name 258 | ) 259 | 260 | return self.api.get_cluster_custom_object(self.group, self.version, self.plural, self.name) 261 | 262 | except ApiException as e: 263 | if e.reason == 'Not Found': 264 | return None 265 | 266 | log.error('{}'.format(add_indent(e.body))) 267 | raise ProvisioningError(e) 268 | 269 | def create(self): 270 | self._validate() 271 | 272 | try: 273 | if self.namespace: 274 | return self.api.create_namespaced_custom_object( 275 | self.group, self.version, self.namespace, self.plural, self.body 276 | ) 277 | 278 | return self.api.create_cluster_custom_object(self.group, self.version, self.plural, self.body) 279 | 280 | except ApiException as e: 281 | log.error('{}'.format(add_indent(e.body))) 282 | raise ProvisioningError(e) 283 | 284 | def delete(self): 285 | self._validate() 286 | 287 | try: 288 | if self.namespace: 289 | return self.api.delete_namespaced_custom_object( 290 | self.group, self.version, self.namespace, self.plural, self.name, 291 | body=client.V1DeleteOptions(propagation_policy='Foreground') 292 | ) 293 | 294 | return self.api.delete_cluster_custom_object( 295 | self.group, self.version, self.plural, self.name, 296 | body=client.V1DeleteOptions(propagation_policy='Foreground') 297 | ) 298 | 299 | except ApiException as e: 300 | if e.reason == 'Not Found': 301 | return None 302 | 303 | log.error( 304 | '{}'.format(add_indent(e.body))) 305 | raise ProvisioningError(e) 306 | 307 | def replace(self, parameters): 308 | self._validate() 309 | 310 | if 'resourceVersion' in parameters: 311 | self.body['metadata']['resourceVersion'] = parameters['resourceVersion'] 312 | 313 | try: 314 | if self.namespace: 315 | return self.api.replace_namespaced_custom_object( 316 | self.group, self.version, self.namespace, self.plural, self.name, self.body 317 | ) 318 | 319 | return self.api.replace_cluster_custom_object( 320 | self.group, self.version, self.plural, self.name, self.body 321 | ) 322 | except ApiException as e: 323 | log.error('{}'.format(add_indent(e.body))) 324 | raise ProvisioningError(e) 325 | 326 | def _validate(self): 327 | if not self.plural: 328 | raise RuntimeError("No valid plural name of resource definition discovered") 329 | 330 | if not self.group: 331 | raise RuntimeError("No valid resource definition group discovered") 332 | 333 | if not self.version: 334 | raise RuntimeError("No valid version of resource definition supplied") 335 | -------------------------------------------------------------------------------- /k8s_handle/k8s/api_clients.py: -------------------------------------------------------------------------------- 1 | import re 2 | import logging 3 | 4 | from kubernetes.client.api_client import ApiClient 5 | 6 | from k8s_handle.exceptions import InvalidWarningHeader 7 | 8 | log = logging.getLogger(__name__) 9 | 10 | 11 | class ApiClientWithWarningHandler(ApiClient): 12 | def __init__(self, *args, **kwargs): 13 | self.warning_handler = kwargs.pop("warning_handler", None) 14 | 15 | ApiClient.__init__(self, *args, **kwargs) 16 | 17 | def request(self, *args, **kwargs): 18 | response_data = ApiClient.request(self, *args, **kwargs) 19 | 20 | if self.warning_handler is not None: 21 | headers = response_data.getheaders() 22 | 23 | if "Warning" in headers: 24 | self._handle_warnings([headers["Warning"]], self.warning_handler) 25 | 26 | return response_data 27 | 28 | @staticmethod 29 | def _handle_warnings(headers, handler): 30 | try: 31 | warnings = ApiClientWithWarningHandler._parse_warning_headers(headers) 32 | except InvalidWarningHeader as e: 33 | log.debug("Warning headers: {}".format(headers)) 34 | log.error(e) 35 | return 36 | 37 | for warning in warnings: 38 | handler.handle_warning_header(*warning) 39 | 40 | @staticmethod 41 | def _parse_warning_headers(headers): 42 | """ 43 | Based on `ParseWarningHeaders()` from k8s.io/apimachinery/pkg/util/net package. 44 | """ 45 | results = [] 46 | 47 | for header in headers: 48 | while len(header) > 0: 49 | result, remainder = ApiClientWithWarningHandler._parse_warning_header(header) 50 | results += [result] 51 | header = remainder 52 | 53 | return results 54 | 55 | @staticmethod 56 | def _parse_warning_header(header): 57 | """ 58 | Based on `ParseWarningHeader()` from k8s.io/apimachinery/pkg/util/net package, 59 | but with much more permissive validation rules. 60 | """ 61 | 62 | parts = header.split(" ", maxsplit=2) 63 | if len(parts) != 3: 64 | raise InvalidWarningHeader("Invalid warning header: fewer than 3 segments") 65 | 66 | (code, agent, textDateRemainder) = (parts[0], parts[1], parts[2]) 67 | 68 | # verify code format 69 | codeMatcher = re.compile("^[0-9]{3}$") 70 | if not codeMatcher.match(code): 71 | raise InvalidWarningHeader("Invalid warning header: code segment is not 3 digits") 72 | 73 | code = int(code) 74 | 75 | # verify agent presence 76 | if len(agent) == 0: 77 | raise InvalidWarningHeader("Invalid warning header: empty agent segment") 78 | 79 | # verify textDateRemainder presence 80 | if len(textDateRemainder) == 0: 81 | raise InvalidWarningHeader("Invalid warning header: empty text segment") 82 | 83 | # extract text 84 | text, dateAndRemainder = ApiClientWithWarningHandler._parse_quoted_string(textDateRemainder) 85 | 86 | result = (code, agent, text) 87 | remainder = "" 88 | 89 | if len(dateAndRemainder) > 0: 90 | if dateAndRemainder[0] == '"': 91 | # consume date 92 | foundEndQuote = False 93 | for i in range(1, len(dateAndRemainder)): 94 | if dateAndRemainder[i] == '"': 95 | foundEndQuote = True 96 | remainder = dateAndRemainder[i+1:].strip() 97 | break 98 | 99 | if not foundEndQuote: 100 | raise InvalidWarningHeader("Invalid warning header: unterminated date segment") 101 | else: 102 | remainder = dateAndRemainder 103 | 104 | if len(remainder) > 0: 105 | if remainder[0] == ',': 106 | # consume comma if present 107 | remainder = remainder[1:].strip() 108 | else: 109 | raise InvalidWarningHeader("Invalid warning header: unexpected token after warn-date") 110 | 111 | return result, remainder 112 | 113 | @staticmethod 114 | def _parse_quoted_string(quotedString): 115 | """ 116 | Based on `parseQuotedString()` from k8s.io/apimachinery/pkg/util/net package. 117 | """ 118 | 119 | if len(quotedString) == 0: 120 | raise InvalidWarningHeader("Invalid warning header: invalid quoted string: 0-length") 121 | 122 | if quotedString[0] != '"': 123 | raise InvalidWarningHeader("Invalid warning header: invalid quoted string: missing initial quote") 124 | 125 | quotedString = quotedString[1:] 126 | remainder = "" 127 | escaping = False 128 | closedQuote = False 129 | result = "" 130 | 131 | for i in range(0, len(quotedString)): 132 | b = quotedString[i] 133 | if b == '"': 134 | if escaping: 135 | result += b 136 | escaping = False 137 | else: 138 | closedQuote = True 139 | remainder = quotedString[i+1:].strip() 140 | break 141 | elif b == '\\': 142 | if escaping: 143 | result += b 144 | escaping = False 145 | else: 146 | escaping = True 147 | else: 148 | result += b 149 | escaping = False 150 | 151 | if not closedQuote: 152 | raise InvalidWarningHeader("Invalid warning header: invalid quoted string: missing closing quote") 153 | 154 | return (result, remainder) 155 | -------------------------------------------------------------------------------- /k8s_handle/k8s/api_extensions.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from kubernetes import client 4 | from kubernetes.client.rest import ApiException 5 | 6 | from k8s_handle.exceptions import ProvisioningError 7 | from k8s_handle.transforms import add_indent 8 | 9 | log = logging.getLogger(__name__) 10 | 11 | 12 | class ResourcesAPI(client.ApisApi): 13 | def list_api_resource_arbitrary(self, group, version): 14 | try: 15 | log.debug(f"calling /apis/{group}/{version}") 16 | return self.api_client.call_api( 17 | '/apis/{}/{}'.format(group, version), 'GET', 18 | {}, 19 | [], 20 | { 21 | 'Accept': self.api_client.select_header_accept( 22 | ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'] 23 | ), 24 | 'Content-Type': self.api_client.select_header_content_type( 25 | ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'] 26 | ) 27 | }, 28 | body=None, 29 | post_params=[], 30 | files={}, 31 | response_type='V1APIResourceList', 32 | auth_settings=['BearerToken'], 33 | async_req=None, 34 | _return_http_data_only=True, 35 | _preload_content=True, 36 | _request_timeout=None, 37 | collection_formats={} 38 | ) 39 | except ApiException as e: 40 | if e.reason == 'Not Found': 41 | log.error('The resource definition with the specified group and version was not found') 42 | return None 43 | 44 | log.error('{}'.format(add_indent(e.body))) 45 | raise ProvisioningError(e) 46 | 47 | 48 | class CoreResourcesAPI(client.CoreApi): 49 | 50 | def list_api_resources(self, version): 51 | try: 52 | return self.api_client.call_api( 53 | resource_path='/api/{}'.format(version), 54 | method='GET', 55 | header_params={ 56 | 'Accept': self.api_client.select_header_accept( 57 | ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'] 58 | ), 59 | 'Content-Type': self.api_client.select_header_content_type( 60 | ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'] 61 | ) 62 | }, 63 | response_type='V1APIResourceList', 64 | auth_settings=['BearerToken'], 65 | _return_http_data_only=True, 66 | _preload_content=True, 67 | ) 68 | except ApiException as e: 69 | if e.reason == 'Not Found': 70 | log.error('The resource definition with the specified version was not found') 71 | return None 72 | 73 | log.error('{}'.format(add_indent(e.body))) 74 | raise ProvisioningError(e) 75 | -------------------------------------------------------------------------------- /k8s_handle/k8s/diff.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import logging 3 | import copy 4 | from difflib import ndiff 5 | from datetime import datetime 6 | from functools import reduce 7 | import operator 8 | import yaml 9 | from .adapters import Adapter 10 | from k8s_handle.templating import get_template_contexts 11 | log = logging.getLogger(__name__) 12 | 13 | IGNORE_FIELDS = [ 14 | 'metadata.annotations:kubectl.kubernetes.io/last-applied-configuration', 15 | 'metadata.annotations:deployment.kubernetes.io/revision', 16 | 'metadata:creationTimestamp', 17 | 'metadata:resourceVersion', 18 | 'metadata:selfLink', 19 | 'metadata:uid', 20 | 'metadata:namespace', 21 | 'metadata:generation', 22 | 'metadata:managedFields', 23 | 'status' 24 | ] 25 | 26 | 27 | def remove_from_dict(d, path, key): 28 | del reduce(operator.getitem, path, d)[key] 29 | 30 | 31 | def to_dict(obj): 32 | if hasattr(obj, 'attribute_map'): 33 | result = {} 34 | for k, v in getattr(obj, 'attribute_map').items(): 35 | val = getattr(obj, k) 36 | if val is not None: 37 | result[v] = to_dict(val) 38 | return result 39 | elif isinstance(obj, list): 40 | return [to_dict(x) for x in obj] 41 | elif isinstance(obj, datetime): 42 | return str(obj) 43 | elif isinstance(obj, dict): 44 | newobj = copy.deepcopy(obj) 45 | for k, v in obj.items(): 46 | newobj[k] = to_dict(obj[k]) 47 | return newobj 48 | else: 49 | return obj 50 | 51 | 52 | def apply_filter(d, field_path): 53 | try: 54 | path, field = field_path.split(':') 55 | path = path.split('.') 56 | except ValueError: 57 | del d[field_path] 58 | else: 59 | remove_from_dict(d, path, field) 60 | 61 | 62 | class Diff: 63 | @staticmethod 64 | def run(file_path): 65 | for template_body in get_template_contexts(file_path): 66 | if template_body.get('kind') == 'Secret': 67 | log.info(f'Skipping secret {template_body.get("metadata", {}).get("name")}') 68 | continue 69 | kube_client = Adapter.get_instance(template_body) 70 | new = yaml.safe_dump(template_body) 71 | k8s_object = kube_client.get() 72 | if k8s_object is None: 73 | current_dict = {} 74 | else: 75 | current_dict = to_dict(k8s_object) 76 | for field_path in IGNORE_FIELDS: 77 | try: 78 | apply_filter(current_dict, field_path) 79 | except KeyError: 80 | pass 81 | metadata = current_dict.get('metadata', {}) 82 | if 'annotations' in metadata and metadata['annotations'] == {}: 83 | del metadata['annotations'] 84 | current = yaml.safe_dump(current_dict) 85 | if new == current: 86 | log.info(f' Kind: "{template_body.get("kind")}", ' 87 | f'name: "{template_body.get("metadata", {}).get("name")}" : NO CHANGES') 88 | else: 89 | diff = ndiff(current.splitlines(keepends=True), new.splitlines(keepends=True)) 90 | log.info(f' Kind: "{template_body.get("kind")}", ' 91 | f'name: "{template_body.get("metadata", {}).get("name")}"') 92 | sys.stdout.write(''.join(diff)) 93 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: test/test 2 | kind: Deployment 3 | metadata: 4 | name: test2 5 | spec: 6 | replicas: 1 7 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/deployment_404.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: test/test 2 | kind: Deployment 3 | metadata: 4 | name: "404" 5 | spec: 6 | replicas: 1 7 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/deployment_no_api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: test 2 | kind: Deployment 3 | metadata: 4 | name: ololo 5 | spec: 6 | replicas: 1 7 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/deployment_wo_replicas.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: test/test 2 | kind: Deployment 3 | metadata: 4 | name: test1 5 | spec: 6 | revisionHistoryLimit: 3 7 | 8 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/empty.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/2gis/k8s-handle/144fab41f5c59f458333a9e50777641b96b651de/k8s_handle/k8s/fixtures/empty.yaml -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/invalid_version.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extra/v1 3 | kind: Service 4 | metadata: 5 | name: my-service 6 | spec: 7 | ports: 8 | - name: HTTP 9 | port: 80 10 | protocol: TCP 11 | targetPort: 80 12 | selector: 13 | app: my-app 14 | type: ClusterIP 15 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/nokind.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/nometadata.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/nometadataname.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | lables: 6 | a: 1 -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: test/test 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: test1 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 1Gi 11 | storageClassName: test 12 | selector: 13 | matchLabels: 14 | volume: test 15 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/pvc2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: test/test 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: test2 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 1Gi 11 | storageClassName: test 12 | selector: 13 | matchLabels: 14 | volume: test 15 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/pvc3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: test/test 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: test2 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 1Gi 11 | storageClassName: test 12 | volumeMode: Filesystem 13 | lol: lol 14 | selector: 15 | matchLabels: 16 | volume: test 17 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: test/test 2 | kind: Service 3 | metadata: 4 | name: test1 5 | spec: 6 | ports: 7 | - port: 123 8 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/service_empty_kind.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: test/test 2 | kind: 3 | metadata: 4 | name: ololo 5 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/service_no_kind.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: test/test 2 | metadata: 3 | name: ololo 4 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/service_no_ports.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: test/test 2 | kind: Service 3 | metadata: 4 | name: test1 5 | spec: 6 | selector: 7 | app: test1 8 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/unsupported_version.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Deployment 4 | metadata: 5 | name: my-service 6 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/valid.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: my-service 6 | spec: 7 | ports: 8 | - name: HTTP 9 | port: 80 10 | protocol: TCP 11 | targetPort: 80 12 | selector: 13 | app: my-app 14 | type: ClusterIP 15 | -------------------------------------------------------------------------------- /k8s_handle/k8s/fixtures/valid_version.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: app/v1 3 | kind: Service 4 | metadata: 5 | name: my-service 6 | spec: 7 | ports: 8 | - name: HTTP 9 | port: 80 10 | protocol: TCP 11 | targetPort: 80 12 | selector: 13 | app: my-app 14 | type: ClusterIP 15 | -------------------------------------------------------------------------------- /k8s_handle/k8s/mocks.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | from kubernetes.client import V1APIResourceList 4 | from kubernetes.client.rest import ApiException 5 | 6 | 7 | class K8sClientMock: 8 | def __init__(self, name=None): 9 | self.name = name 10 | pass 11 | 12 | # Deployment 13 | def read_namespaced_deployment(self, name, namespace): 14 | if self.name == 'fail': 15 | raise ApiException('Get deployment fail') 16 | if self.name == '404' or name == '404': 17 | raise ApiException(reason='Not Found') 18 | 19 | my_response = namedtuple('my_response', 'metadata status spec') 20 | my_status = namedtuple('my_status', 21 | 'replicas available_replicas ready_replicas updated_replicas unavailable_replicas') 22 | my_spec = namedtuple('my_spec', 'replicas') 23 | if self.name == 'test1': 24 | return my_response(metadata={}, spec=my_spec(replicas=3), 25 | status=my_status(replicas=3, 26 | available_replicas=2, 27 | ready_replicas=1, 28 | updated_replicas=None, 29 | unavailable_replicas=1)) 30 | if self.name == 'test2' or name == 'test2': 31 | return my_response(metadata={}, spec=my_spec(replicas=1), 32 | status=my_status(replicas=1, 33 | available_replicas=1, 34 | ready_replicas=1, 35 | updated_replicas=1, 36 | unavailable_replicas=None)) 37 | 38 | return my_response(metadata={'key1': 'value1'}, status={'key1': 'value1'}, spec={'key1': 'value1'}) 39 | 40 | def create_namespaced_deployment(self, body, namespace): 41 | if self.name == 'fail': 42 | raise ApiException('Create deployment fail') 43 | 44 | return {'key1': 'value1'} 45 | 46 | def replace_namespaced_deployment(self, name, body, namespace): 47 | if self.name == 'fail': 48 | raise ApiException('Replace deployment fail') 49 | 50 | return {'key1': 'value1'} 51 | 52 | def delete_namespaced_deployment(self, name, body, namespace): 53 | if self.name == 'fail': 54 | raise ApiException('Delete deployment fail') 55 | if self.name == '404' or name == '404': 56 | raise ApiException(reason='Not Found') 57 | 58 | if self.name == 'test1' or name == 'test1': 59 | my_response = namedtuple('my_response', 'message') 60 | return my_response(message='Failed') 61 | 62 | if self.name == 'test2' or name == 'test2': 63 | my_response = namedtuple('my_response', 'message') 64 | return my_response(message=None) 65 | 66 | return {'key1': 'value1'} 67 | 68 | # Service 69 | def read_namespaced_service(self, name, namespace, body=None): 70 | if self.name == 'fail': 71 | raise ApiException('Get service fail') 72 | if self.name == '404': 73 | raise ApiException(reason='Not Found') 74 | 75 | my_response = namedtuple('my_response', 'metadata status spec') 76 | my_status = namedtuple('my_status', 77 | 'replicas available_replicas ready_replicas updated_replicas unavailable_replicas') 78 | my_spec = namedtuple('my_spec', 'ports') 79 | my_port = namedtuple('my_port', 'port name') 80 | 81 | if self.name == 'test1': 82 | return my_response(metadata={}, spec=my_spec(ports=[my_port(port=123, name='test1')]), 83 | status=my_status(replicas=3, 84 | available_replicas=2, 85 | ready_replicas=1, 86 | updated_replicas=None, 87 | unavailable_replicas=1)) 88 | if self.name == 'test2': 89 | return my_response(metadata={}, spec=my_spec(ports=[]), 90 | status=my_status(replicas=1, 91 | available_replicas=1, 92 | ready_replicas=1, 93 | updated_replicas=1, 94 | unavailable_replicas=None)) 95 | 96 | return my_response(metadata={'key1': 'value1'}, status={'key1': 'value1'}, spec={}) 97 | 98 | def replace_namespaced_service(self, name, body, namespace): 99 | if self.name == 'fail': 100 | raise ApiException('Replace service fail') 101 | 102 | return {'key1': 'value1'} 103 | 104 | def delete_namespaced_service(self, name, namespace, body=None): 105 | my_response = namedtuple('my_response', 'message') 106 | return my_response(message='Failed') 107 | 108 | def patch_namespaced_service(self, name, body, namespace): 109 | return {'key1': 'value1'} 110 | 111 | # StatefulSet 112 | def read_namespaced_stateful_set(self, name, namespace): 113 | if self.name == 'fail': 114 | raise ApiException('Get statefulset fail') 115 | if self.name == '404': 116 | raise ApiException(reason='Not Found') 117 | 118 | my_response = namedtuple('my_response', 'metadata status spec') 119 | my_status = namedtuple('my_status', 120 | 'current_replicas current_revision ready_replicas replicas update_revision') 121 | my_spec = namedtuple('my_spec', 'replicas') 122 | 123 | if self.name == 'test1': 124 | return my_response(metadata={}, spec=my_spec(replicas=3), 125 | status=my_status(current_replicas=2, 126 | current_revision='revision-123', 127 | ready_replicas=1, 128 | replicas=3, 129 | update_revision='revision-321')) 130 | 131 | if self.name == 'test2': 132 | return my_response(metadata={}, spec=my_spec(replicas=3), 133 | status=my_status(current_replicas=3, 134 | current_revision='revision-123', 135 | ready_replicas=3, 136 | replicas=3, 137 | update_revision='revision-123')) 138 | 139 | return my_response(metadata={'key1': 'value1'}, status={'key1': 'value1'}, spec={'key1': 'value1'}) 140 | 141 | # DaemonSet 142 | def read_namespaced_daemon_set(self, name, namespace): 143 | if self.name == 'fail': 144 | raise ApiException('Get daemonset fail') 145 | if self.name == '404': 146 | raise ApiException(reason='Not Found') 147 | 148 | my_response = namedtuple('my_response', 'metadata status') 149 | my_status = namedtuple('my_status', 'desired_number_scheduled number_available ' 150 | 'number_ready updated_number_scheduled number_unavailable') 151 | 152 | if self.name == 'test1': 153 | return my_response(metadata={}, status=my_status(desired_number_scheduled=2, 154 | number_available=2, 155 | number_ready=1, 156 | updated_number_scheduled=1, 157 | number_unavailable=1)) 158 | if self.name == 'test2': 159 | return my_response(metadata={}, status=my_status(desired_number_scheduled=2, 160 | number_available=2, 161 | number_ready=2, 162 | updated_number_scheduled=2, 163 | number_unavailable=None)) 164 | 165 | return my_response(metadata={'key1': 'value1'}, status={'key1': 'value1'}) 166 | 167 | # Job 168 | def read_namespaced_job(self, name, namespace): 169 | if self.name == 'fail': 170 | raise ApiException('Get daemonset fail') 171 | if self.name == '404': 172 | raise ApiException(reason='Not Found') 173 | 174 | my_response = namedtuple('my_response', 'metadata status') 175 | my_status = namedtuple('my_status', 'failed conditions') 176 | 177 | if self.name == 'test1': 178 | return my_response(metadata={}, status=my_status(failed='Failed', 179 | conditions=[])) 180 | if self.name == 'test2': 181 | my_conditions = namedtuple('my_conditions', 'type') 182 | return my_response(metadata={}, status=my_status(failed=None, 183 | conditions=[my_conditions(type='Failed')])) 184 | if self.name == 'test3': 185 | my_conditions = namedtuple('my_conditions', 'type') 186 | return my_response(metadata={}, status=my_status(failed=None, 187 | conditions=[my_conditions(type='Complete')])) 188 | 189 | return my_response(metadata={'key1': 'value1'}, status={'key1': 'value1'}) 190 | 191 | # StorageClass 192 | def read_storage_class(self, name): 193 | if self.name == 'fail': 194 | raise ApiException('Get storage class fail') 195 | if self.name == '404' or name == '404': 196 | raise ApiException(reason='Not Found') 197 | 198 | my_response = namedtuple('my_response', 'metadata status') 199 | my_status = namedtuple('my_status', 200 | 'replicas available_replicas ready_replicas updated_replicas unavailable_replicas') 201 | 202 | if self.name == 'test1': 203 | return my_response(metadata={}, status=my_status(replicas=3, 204 | available_replicas=2, 205 | ready_replicas=1, 206 | updated_replicas=None, 207 | unavailable_replicas=1)) 208 | if self.name == 'test2' or name == 'test2': 209 | return my_response(metadata={}, status=my_status(replicas=1, 210 | available_replicas=1, 211 | ready_replicas=1, 212 | updated_replicas=1, 213 | unavailable_replicas=None)) 214 | 215 | return my_response(metadata={'key1': 'value1'}, status={'key1': 'value1'}) 216 | 217 | def create_storage_class(self, body): 218 | if self.name == 'fail': 219 | raise ApiException('Create storage class fail') 220 | 221 | return {'key1': 'value1'} 222 | 223 | def replace_storage_class(self, name, body): 224 | if self.name == 'fail': 225 | raise ApiException('Replace storage class fail') 226 | 227 | return {'key1': 'value1'} 228 | 229 | def delete_storage_class(self, name, body): 230 | if self.name == 'fail': 231 | raise ApiException('Delete storage class fail') 232 | if self.name == '404' or name == '404': 233 | raise ApiException(reason='Not Found') 234 | 235 | if self.name == 'test1' or name == 'test1': 236 | my_response = namedtuple('my_response', 'message') 237 | return my_response(message='Failed') 238 | 239 | if self.name == 'test2' or name == 'test2': 240 | my_response = namedtuple('my_response', 'message') 241 | return my_response(message=None) 242 | 243 | return {'key1': 'value1'} 244 | 245 | # PersistentVolumeClaim 246 | def read_namespaced_persistent_volume_claim(self, name, namespace): 247 | my_response = namedtuple('my_response', 'spec metadata') 248 | my_spec = namedtuple('my_spec', 249 | 'access_modes resources selector storage_class_name') 250 | 251 | if self.name == 'test1' or name == 'test1': 252 | return my_response(metadata={}, spec=my_spec(access_modes=['ReadWriteOnce'], 253 | resources={'requests': {'storage': '1Gi'}}, 254 | storage_class_name='test', 255 | selector={'matchLabels': {'volume': 'test'}})) 256 | 257 | if self.name == 'test2' or name == 'test2': 258 | return my_response(metadata={}, spec=my_spec(access_modes=['ReadWriteOnce'], 259 | resources={'requests': {'storage': '2Gi'}}, 260 | storage_class_name='test', 261 | selector={'matchLabels': {'volume': 'test'}})) 262 | 263 | return my_response(metadata={'key1': 'value1'}, spec={'key1': 'value1'}) 264 | 265 | def replace_namespaced_persistent_volume_claim(self, name, body, namespace): 266 | if self.name == 'test2' or name == 'test2': 267 | raise ApiException('Replace persistent volume claim fail') 268 | 269 | 270 | class CustomObjectsAPIMock: 271 | pass 272 | 273 | 274 | class ResourcesAPIMock: 275 | def __init__(self, api_version=None, group_version=None, resources=None): 276 | self._resources = resources 277 | self._api_version = api_version 278 | self._group_version = group_version 279 | self._kind = 'APIResourceList' 280 | 281 | def list_api_resource_arbitrary(self, group, version): 282 | if not self._resources or self._group_version != '{}/{}'.format(group, version): 283 | return None 284 | 285 | return V1APIResourceList(self._api_version, self._group_version, self._kind, self._resources) 286 | 287 | def list_api_resources(self, version): 288 | if not self._resources or self._group_version != version: 289 | return None 290 | 291 | return V1APIResourceList(self._api_version, self._group_version, self._kind, self._resources) 292 | -------------------------------------------------------------------------------- /k8s_handle/k8s/provisioner.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from time import sleep 3 | 4 | from kubernetes.client.models.v1_label_selector import V1LabelSelector 5 | from kubernetes.client.models.v1_label_selector_requirement import V1LabelSelectorRequirement 6 | from kubernetes.client.models.v1_resource_requirements import V1ResourceRequirements 7 | 8 | from k8s_handle import settings 9 | from k8s_handle.templating import get_template_contexts 10 | from k8s_handle.transforms import split_str_by_capital_letters 11 | from .adapters import Adapter 12 | from .warning_handler import WarningHandler 13 | 14 | log = logging.getLogger(__name__) 15 | 16 | 17 | class Provisioner: 18 | def __init__(self, command, sync_mode, show_logs): 19 | self.command = command 20 | self.sync_mode = False if show_logs else sync_mode 21 | self.show_logs = show_logs 22 | self._warning_handler = WarningHandler() 23 | 24 | @staticmethod 25 | def _replicas_count_are_equal(replicas): 26 | replicas = [0 if r is None else r for r in replicas] # replace all None to 0 27 | return all(r == replicas[0] for r in replicas) 28 | 29 | @staticmethod 30 | def _is_job_complete(status): 31 | if status.failed is not None: 32 | raise RuntimeError('Job running failed') 33 | 34 | if status.conditions is not None: 35 | for condition in status.conditions: 36 | if condition.type == 'Complete': 37 | return True 38 | else: 39 | return False 40 | 41 | def run(self, file_path): 42 | if self.command == 'deploy': 43 | self._deploy_all(file_path) 44 | if self.command == 'destroy': 45 | self._destroy_all(file_path) 46 | 47 | def _is_pvc_specs_equals(self, old_obj, new_dict): 48 | for new_key in new_dict.keys(): 49 | old_key = split_str_by_capital_letters(new_key) 50 | 51 | # if template has some new attributes 52 | if not hasattr(old_obj, old_key): 53 | return False 54 | old_value = getattr(old_obj, old_key) 55 | 56 | if isinstance(old_value, list) and \ 57 | isinstance(old_value[0], V1LabelSelectorRequirement): 58 | if len(old_value) != len(new_dict[new_key]): 59 | return False 60 | for i in range(0, len(old_value)): 61 | if not self._is_pvc_specs_equals(old_value[i], new_dict[new_key][i]): 62 | return False 63 | 64 | elif isinstance(old_value, (V1ResourceRequirements, V1LabelSelector)): 65 | if not self._is_pvc_specs_equals(old_value, new_dict[new_key]): 66 | return False 67 | 68 | elif old_value != new_dict[new_key]: 69 | log.error('{} != {}'.format(old_value, new_dict[new_key])) 70 | return False 71 | 72 | return True 73 | 74 | def _deploy_all(self, file_path): 75 | for template_body in get_template_contexts(file_path): 76 | self._deploy(template_body, file_path) 77 | 78 | def _deploy(self, template_body, file_path): 79 | kube_client = Adapter.get_instance(template_body, warning_handler=self._warning_handler) 80 | 81 | if not kube_client: 82 | raise RuntimeError( 83 | 'Unknown apiVersion "{}" in template "{}"'.format( 84 | template_body['apiVersion'], 85 | file_path 86 | ) 87 | ) 88 | 89 | log.info('Using namespace "{}"'.format(kube_client.namespace)) 90 | resource = kube_client.get() 91 | 92 | if resource is None: 93 | log.info('{} "{}" does not exist, create it'.format(template_body['kind'], kube_client.name)) 94 | kube_client.create() 95 | else: 96 | log.info('{} "{}" already exists, replace it'.format(template_body['kind'], kube_client.name)) 97 | parameters = {} 98 | 99 | if hasattr(resource, 'metadata'): 100 | if hasattr(resource.metadata, 'resource_version'): 101 | parameters['resourceVersion'] = resource.metadata.resource_version 102 | elif 'metadata' in resource: 103 | if 'resourceVersion' in resource['metadata']: 104 | parameters['resourceVersion'] = resource['metadata']['resourceVersion'] 105 | 106 | if template_body['kind'] == 'Service': 107 | if hasattr(resource.spec, 'cluster_ip'): 108 | parameters['clusterIP'] = resource.spec.cluster_ip 109 | 110 | if template_body['kind'] == 'PersistentVolumeClaim': 111 | if self._is_pvc_specs_equals(resource.spec, template_body['spec']): 112 | log.info('PersistentVolumeClaim is not changed') 113 | return 114 | 115 | if template_body['kind'] == 'PersistentVolume': 116 | if resource.status.phase in ['Bound', 'Released']: 117 | log.warning('PersistentVolume has "{}" status, skip replacing'.format(resource.status.phase)) 118 | return 119 | 120 | kube_client.replace(parameters) 121 | 122 | if self.sync_mode: 123 | if template_body['kind'] == 'Deployment': 124 | self._wait_deployment_complete(kube_client, 125 | tries=settings.CHECK_STATUS_TRIES, 126 | timeout=settings.CHECK_STATUS_TIMEOUT) 127 | 128 | if template_body['kind'] == 'StatefulSet': 129 | self._wait_statefulset_complete(kube_client, 130 | tries=settings.CHECK_STATUS_TRIES, 131 | timeout=settings.CHECK_STATUS_TIMEOUT) 132 | 133 | # INFO: vadim.reyder Since Kubernetes version 1.6 all DaemonSets by default have 134 | # `updateStrategy.type`=`RollingUpdate`, so we wait for deploy only if `updateStrategy.type` != 'OnDelete'. 135 | # WARNING: We consciously skip case with kubernetes version < 1.6, due to it's very old. 136 | if template_body['kind'] == 'DaemonSet' and \ 137 | template_body.get('spec').get('updateStrategy', {}).get('type') != 'OnDelete': 138 | self._wait_daemonset_complete(kube_client, 139 | tries=settings.CHECK_STATUS_TRIES, 140 | timeout=settings.CHECK_STATUS_TIMEOUT) 141 | 142 | if template_body['kind'] == 'Job': 143 | return self._wait_job_complete(kube_client, 144 | tries=settings.CHECK_STATUS_TRIES, 145 | timeout=settings.CHECK_STATUS_TIMEOUT) 146 | 147 | if template_body['kind'] == 'Job' and self.show_logs: 148 | pod_name, pod_containers = self._get_pod_name_and_containers_by_selector( 149 | kube_client, 150 | template_body['metadata']['name'], 151 | tries=settings.CHECK_STATUS_TRIES, 152 | timeout=settings.CHECK_STATUS_TIMEOUT) 153 | 154 | log.info("Got pod name and pod containers {} {}".format(pod_name, pod_containers)) 155 | 156 | if not pod_name: 157 | log.warning('Pod not found for showing logs') 158 | return 159 | 160 | is_successful = self._wait_pod_running( 161 | kube_client, 162 | pod_name, 163 | tries=settings.CHECK_STATUS_TRIES, 164 | timeout=settings.CHECK_STATUS_TIMEOUT) 165 | 166 | for pod_container in pod_containers: 167 | log.info('\n{}'.format(kube_client.read_pod_logs(pod_name, pod_container))) 168 | 169 | if not is_successful: 170 | raise RuntimeError('Job running failed') 171 | 172 | def _destroy_all(self, file_path): 173 | for template_body in get_template_contexts(file_path): 174 | self._destroy(template_body, file_path) 175 | 176 | def _destroy(self, template_body, file_path): 177 | kube_client = Adapter.get_instance(template_body, warning_handler=self._warning_handler) 178 | 179 | if not kube_client: 180 | raise RuntimeError( 181 | 'Unknown apiVersion "{}" in template "{}"'.format( 182 | template_body['apiVersion'], 183 | file_path 184 | ) 185 | ) 186 | 187 | log.info('Using namespace "{}"'.format(kube_client.namespace)) 188 | log.info('Trying to delete {} "{}"'.format(template_body['kind'], kube_client.name)) 189 | response = kube_client.delete() 190 | 191 | if response is None: 192 | log.info("{} {} is not found".format(template_body['kind'], kube_client.name)) 193 | return 194 | 195 | # custom objects api response is a simple dictionary without message field 196 | if hasattr(response, 'message') and response.message is not None: 197 | raise RuntimeError('{} "{}" deletion failed: {}'.format( 198 | template_body['kind'], kube_client.name, response.message)) 199 | 200 | if isinstance(response, dict) and not response.get('metadata', {}).get('deletionTimestamp'): 201 | raise RuntimeError('{} "{}" deletion failed: {}'.format(template_body['kind'], kube_client.name, response)) 202 | 203 | if self.sync_mode: 204 | self._wait_destruction_complete(kube_client, template_body['kind'], 205 | tries=settings.CHECK_STATUS_TRIES, 206 | timeout=settings.CHECK_STATUS_TIMEOUT) 207 | 208 | log.info('{} "{}" has been deleted'.format(template_body['kind'], kube_client.name)) 209 | 210 | @staticmethod 211 | def _get_pod_name_and_containers_by_selector(kube_client, selector, tries, timeout): 212 | for i in range(0, tries): 213 | pod = kube_client.get_pods_by_selector(selector) 214 | 215 | if len(pod.items) == 1: 216 | log.info('Found pod "{}"'.format(pod.items[0].metadata.name)) 217 | containers = [container.name for container in pod.items[0].spec.containers] 218 | return pod.items[0].metadata.name, containers 219 | else: 220 | if len(pod.items) == 0: 221 | log.warning('No pods found by job-name={}, next attempt in {} sec.'.format(selector, timeout)) 222 | else: 223 | names = [pod.metadata.name for pod in pod.items] 224 | log.warning('More than one pod found by job-name={}: {}, ' 225 | 'next attempt in {} sec.'.format(selector, names, timeout)) 226 | sleep(timeout) 227 | 228 | log.error('Problems with getting pod by selector job-name={} for {} tries'.format(selector, tries)) 229 | return '', [] 230 | 231 | def _wait_deployment_complete(self, kube_client, tries, timeout): 232 | for i in range(0, tries): 233 | sleep(timeout) 234 | deployment = kube_client.get() 235 | status = deployment.status 236 | 237 | replicas = [deployment.spec.replicas, status.replicas, status.available_replicas, 238 | status.ready_replicas, status.updated_replicas] 239 | 240 | log.info('desiredReplicas = {}, updatedReplicas = {}, availableReplicas = {}'. 241 | format(replicas[0], replicas[4], replicas[2])) 242 | if self._replicas_count_are_equal(replicas) and status.unavailable_replicas is None: 243 | log.info('Deployment completed on {} attempt'.format(i + 1)) 244 | return 245 | else: 246 | log.info('Deployment not completed on {} attempt, next attempt in {} sec.'.format(i + 1, timeout)) 247 | 248 | raise RuntimeError('Deployment not completed for {} tries'.format(tries)) 249 | 250 | def _wait_statefulset_complete(self, kube_client, tries, timeout): 251 | for i in range(0, tries): 252 | sleep(timeout) 253 | statefulset = kube_client.get() 254 | status = statefulset.status 255 | 256 | current_revision = status.current_revision 257 | update_revision = status.update_revision 258 | replicas = [statefulset.spec.replicas, status.current_replicas, status.ready_replicas] 259 | 260 | log.info('Current revision {}, should be {}'.format(current_revision, update_revision)) 261 | if current_revision == update_revision: 262 | log.info('desiredReplicas = {}, updatedReplicas = {}, availableReplicas = {}'. 263 | format(replicas[0], replicas[1], replicas[2])) 264 | if self._replicas_count_are_equal(replicas): 265 | log.info('StatefulSet completed on {} attempt'.format(i)) 266 | return 267 | else: 268 | log.info('StatefulSet not completed on {} attempt, next attempt in {} sec.'.format(i, timeout)) 269 | 270 | raise RuntimeError('StatefulSet not completed for {} tries'.format(tries)) 271 | 272 | def _wait_daemonset_complete(self, kube_client, tries, timeout): 273 | for i in range(0, tries): 274 | sleep(timeout) 275 | status = kube_client.get().status 276 | 277 | replicas = [status.desired_number_scheduled, status.number_available, 278 | status.number_ready, status.updated_number_scheduled] 279 | log.info('desiredNodes = {}, availableNodes = {}, readyNodes = {}, updatedNodes = {}'. 280 | format(replicas[0], replicas[1], replicas[2], replicas[3])) 281 | if self._replicas_count_are_equal(replicas) and status.number_unavailable is None: 282 | log.info('DaemonSet completed on {} attempt'.format(i)) 283 | return 284 | else: 285 | log.info('DaemonSet not completed on {} attempt, next attempt in {} sec.'.format(i, timeout)) 286 | 287 | raise RuntimeError('DaemonSet not completed for {} tries'.format(tries)) 288 | 289 | def _wait_job_complete(self, kube_client, tries, timeout): 290 | for i in range(0, tries): 291 | sleep(timeout) 292 | status = kube_client.get().status 293 | if self._is_job_complete(status): 294 | log.info('Job completed on {} attempt'.format(i)) 295 | return 296 | else: 297 | log.info('Job not completed on {} attempt, next attempt in {} sec.'.format(i, timeout)) 298 | 299 | raise RuntimeError('Job not completed for {} tries'.format(tries)) 300 | 301 | @staticmethod 302 | def _wait_pod_running(kube_client, pod_name, tries, timeout): 303 | for i in range(0, tries): 304 | status = kube_client.read_pod_status(pod_name) 305 | 306 | log.info('Pod "{}" status: {}'.format(pod_name, status.status.phase)) 307 | if status.status.phase == 'Succeeded': 308 | return True 309 | if status.status.phase in ['Failed', 'Unknown']: 310 | return False 311 | sleep(timeout) 312 | 313 | raise RuntimeError('Pod "{}" not completed for {} tries'.format(pod_name, tries)) 314 | 315 | @staticmethod 316 | def _wait_destruction_complete(kube_client, kind, tries, timeout): 317 | for i in range(0, tries): 318 | sleep(timeout) 319 | if kube_client.get() is None: 320 | log.info('{} destruction completed on {} attempt'.format(kind, i + 1)) 321 | return 322 | else: 323 | log.info('{} destruction not completed on {} attempt, ' 324 | 'next attempt in {} sec.'.format(kind, i + 1, timeout)) 325 | 326 | raise RuntimeError('{} destruction not completed for {} tries'.format(kind, tries)) 327 | -------------------------------------------------------------------------------- /k8s_handle/k8s/test_adapters.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from kubernetes.client import V1APIResource 4 | 5 | from k8s_handle.exceptions import ProvisioningError 6 | from k8s_handle.transforms import split_str_by_capital_letters 7 | from .adapters import Adapter, AdapterBuiltinKind, AdapterCustomKind 8 | from .mocks import K8sClientMock, CustomObjectsAPIMock, ResourcesAPIMock 9 | 10 | 11 | class TestAdapterBuiltInKind(unittest.TestCase): 12 | def test_get_app_kind(self): 13 | self.assertEqual(split_str_by_capital_letters('ConfigMap'), 'config_map') 14 | self.assertEqual(split_str_by_capital_letters('Namespace'), 'namespace') 15 | self.assertEqual(split_str_by_capital_letters('PodDisruptionBudget'), 'pod_disruption_budget') 16 | 17 | def test_app_get_fail(self): 18 | deployment = AdapterBuiltinKind( 19 | api=K8sClientMock('fail'), 20 | spec={'kind': 'Deployment', 'metadata': {'name': 'fail'}, 'spec': {'replicas': 1}}) 21 | with self.assertRaises(ProvisioningError) as context: 22 | deployment.get() 23 | self.assertTrue('Get deployment fail' in str(context.exception)) 24 | 25 | storage = AdapterBuiltinKind( 26 | api=K8sClientMock('fail'), 27 | spec={'kind': 'StorageClass', 'metadata': {'name': 'fail'}, 'spec': {'replicas': 1}}) 28 | with self.assertRaises(ProvisioningError) as context: 29 | storage.get() 30 | self.assertTrue('Get storage class fail' in str(context.exception)) 31 | 32 | def test_app_get_not_found(self): 33 | deployment = AdapterBuiltinKind( 34 | api=K8sClientMock('404'), 35 | spec={'kind': 'Deployment', 'metadata': {'name': '404'}, 'spec': {'replicas': 1}}) 36 | res = deployment.get() 37 | self.assertEqual(res, None) 38 | 39 | storage = AdapterBuiltinKind( 40 | api=K8sClientMock('404'), 41 | spec={'kind': 'StorageClass', 'metadata': {'name': '404'}, 'spec': {'replicas': 1}}) 42 | res = storage.get() 43 | self.assertEqual(res, None) 44 | 45 | def test_app_get(self): 46 | deployment = AdapterBuiltinKind( 47 | api=K8sClientMock(), 48 | spec={'kind': 'Deployment', 'metadata': {'name': 'test'}, 'spec': {'replicas': 1}}) 49 | res = deployment.get() 50 | 51 | self.assertEqual(res.metadata, {'key1': 'value1'}) 52 | 53 | storage = AdapterBuiltinKind( 54 | api=K8sClientMock(), 55 | spec={'kind': 'StorageClass', 'metadata': {'name': 'test'}, 'spec': {'replicas': 1}}) 56 | res = storage.get() 57 | 58 | self.assertEqual(res.metadata, {'key1': 'value1'}) 59 | 60 | def test_app_create_fail(self): 61 | deployment = AdapterBuiltinKind( 62 | api=K8sClientMock('fail'), 63 | spec={'kind': 'Deployment', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 64 | with self.assertRaises(ProvisioningError) as context: 65 | deployment.create() 66 | self.assertTrue('Create deployment fail' in str(context.exception)) 67 | 68 | storage = AdapterBuiltinKind( 69 | api=K8sClientMock('fail'), 70 | spec={'kind': 'StorageClass', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 71 | with self.assertRaises(ProvisioningError) as context: 72 | storage.create() 73 | self.assertTrue('Create storage class fail' in str(context.exception)) 74 | 75 | def test_app_create(self): 76 | deployment = AdapterBuiltinKind( 77 | api=K8sClientMock(''), 78 | spec={'kind': 'Deployment', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 79 | res = deployment.create() 80 | self.assertEqual(res, {'key1': 'value1'}) 81 | 82 | storage = AdapterBuiltinKind( 83 | api=K8sClientMock(''), 84 | spec={'kind': 'StorageClass', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 85 | res = storage.create() 86 | self.assertEqual(res, {'key1': 'value1'}) 87 | 88 | def test_app_replace_fail(self): 89 | deployment = AdapterBuiltinKind( 90 | api=K8sClientMock('fail'), 91 | spec={'kind': 'Deployment', 'metadata': {'name': 'fail'}, 'spec': {'replicas': 1}}) 92 | with self.assertRaises(ProvisioningError) as context: 93 | deployment.replace({}) 94 | self.assertTrue('Replace deployment fail' in str(context.exception)) 95 | 96 | storage = AdapterBuiltinKind( 97 | api=K8sClientMock('fail'), 98 | spec={'kind': 'StorageClass', 'metadata': {'name': 'fail'}, 'spec': {'replicas': 1}}) 99 | with self.assertRaises(ProvisioningError) as context: 100 | storage.replace({}) 101 | self.assertTrue('Replace storage class fail' in str(context.exception)) 102 | 103 | def test_app_replace(self): 104 | deployment = AdapterBuiltinKind( 105 | api=K8sClientMock(''), 106 | spec={'kind': 'Deployment', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 107 | res = deployment.replace({}) 108 | self.assertEqual(res, {'key1': 'value1'}) 109 | 110 | storage = AdapterBuiltinKind( 111 | api=K8sClientMock(''), 112 | spec={'kind': 'StorageClass', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 113 | res = storage.replace({}) 114 | self.assertEqual(res, {'key1': 'value1'}) 115 | 116 | def test_app_replace_service(self): 117 | service = AdapterBuiltinKind( 118 | api=K8sClientMock(''), 119 | spec={'kind': 'Service', 'metadata': {'name': ''}, 'spec': {'type': 'ClusterIP'}}) 120 | res = service.replace({}) 121 | self.assertEqual(res, {'key1': 'value1'}) 122 | 123 | def test_app_delete_fail(self): 124 | client = AdapterBuiltinKind( 125 | api=K8sClientMock('fail'), 126 | spec={'kind': 'Deployment', 'metadata': {'name': 'fail'}, 'spec': {'replicas': 1}}) 127 | with self.assertRaises(ProvisioningError) as context: 128 | client.delete() 129 | self.assertTrue('Delete deployment fail' in str(context.exception)) 130 | 131 | storage = AdapterBuiltinKind( 132 | api=K8sClientMock('fail'), 133 | spec={'kind': 'StorageClass', 'metadata': {'name': 'fail'}, 'spec': {'replicas': 1}}) 134 | with self.assertRaises(ProvisioningError) as context: 135 | storage.delete() 136 | self.assertTrue('Delete storage class fail' in str(context.exception)) 137 | 138 | def test_app_delete_not_found(self): 139 | client = AdapterBuiltinKind( 140 | api=K8sClientMock('404'), 141 | spec={'kind': 'Deployment', 'metadata': {'name': '404'}, 'spec': {'replicas': 1}}) 142 | res = client.delete() 143 | self.assertEqual(res, None) 144 | 145 | storage = AdapterBuiltinKind( 146 | api=K8sClientMock('404'), 147 | spec={'kind': 'StorageClass', 'metadata': {'name': '404'}, 'spec': {'replicas': 1}}) 148 | res = storage.delete() 149 | self.assertEqual(res, None) 150 | 151 | def test_app_delete(self): 152 | client = AdapterBuiltinKind( 153 | api=K8sClientMock(), 154 | spec={'kind': 'Deployment', 'metadata': {'name': 'test'}, 'spec': {'replicas': 1}}) 155 | res = client.delete() 156 | 157 | self.assertEqual(res, {'key1': 'value1'}) 158 | 159 | storage = AdapterBuiltinKind( 160 | api=K8sClientMock(), 161 | spec={'kind': 'StorageClass', 'metadata': {'name': 'test'}, 'spec': {'replicas': 1}}) 162 | res = storage.delete() 163 | 164 | self.assertEqual(res, {'key1': 'value1'}) 165 | 166 | 167 | class TestAdapter(unittest.TestCase): 168 | def test_get_instance_custom(self): 169 | self.assertIsInstance( 170 | Adapter.get_instance({'kind': "CustomKind"}, CustomObjectsAPIMock(), ResourcesAPIMock()), 171 | AdapterCustomKind 172 | ) 173 | self.assertIsInstance( 174 | Adapter.get_instance({'kind': "CustomKind"}, CustomObjectsAPIMock(), ResourcesAPIMock()), 175 | AdapterCustomKind 176 | ) 177 | 178 | def test_get_instance_test(self): 179 | self.assertIsInstance( 180 | Adapter.get_instance( 181 | { 182 | 'kind': Adapter.kinds_builtin[0], 183 | 'apiVersion': 'test/test' 184 | } 185 | ).api, K8sClientMock) 186 | 187 | def test_get_instance_builtin(self): 188 | self.assertIsInstance( 189 | Adapter.get_instance( 190 | { 191 | 'kind': Adapter.kinds_builtin[0], 192 | 'apiVersion': 'apps/v1' 193 | } 194 | ), AdapterBuiltinKind) 195 | 196 | def test_get_instance_negative(self): 197 | self.assertIsNone( 198 | Adapter.get_instance( 199 | { 200 | 'kind': Adapter.kinds_builtin[0], 201 | 'apiVersion': 'unknown' 202 | } 203 | ) 204 | ) 205 | 206 | 207 | class TestAdapterCustomKind(unittest.TestCase): 208 | @staticmethod 209 | def _resources_api_mock(): 210 | return ResourcesAPIMock( 211 | 'version', 212 | 'group/version', 213 | [V1APIResource(None, 'group', 'kind', 'kinds', True, [], 'kind', [], verbs=[])] 214 | ) 215 | 216 | def test_initialization_positive(self): 217 | adapter = Adapter.get_instance( 218 | { 219 | 'kind': 'kind', 220 | 'apiVersion': 'group/version', 221 | 'metadata': { 222 | "namespace": 'test_namespace' 223 | } 224 | }, CustomObjectsAPIMock(), TestAdapterCustomKind._resources_api_mock() 225 | ) 226 | self.assertEqual(adapter.kind, 'kind') 227 | self.assertEqual(adapter.namespace, 'test_namespace') 228 | self.assertEqual(adapter.group, 'group') 229 | self.assertEqual(adapter.version, 'version') 230 | self.assertEqual(adapter.plural, 'kinds') 231 | self.assertIsInstance(adapter.api, CustomObjectsAPIMock) 232 | 233 | def test_initialization_kind_missing(self): 234 | adapter = Adapter.get_instance({}, CustomObjectsAPIMock(), TestAdapterCustomKind._resources_api_mock()) 235 | self.assertFalse(adapter.kind) 236 | self.assertFalse(adapter.plural) 237 | 238 | def test_initialization_api_version_invalid(self): 239 | adapter = Adapter.get_instance({}, CustomObjectsAPIMock(), TestAdapterCustomKind._resources_api_mock()) 240 | self.assertFalse(adapter.group) 241 | self.assertFalse(adapter.version) 242 | 243 | adapter = Adapter.get_instance( 244 | {'apiVersion': 'noslash'}, 245 | CustomObjectsAPIMock(), 246 | TestAdapterCustomKind._resources_api_mock() 247 | ) 248 | self.assertFalse(adapter.group) 249 | self.assertFalse(adapter.version) 250 | 251 | adapter = Adapter.get_instance( 252 | {'apiVersion': 'domain/version/something'}, 253 | CustomObjectsAPIMock(), 254 | ResourcesAPIMock() 255 | ) 256 | self.assertEqual(adapter.group, 'domain') 257 | self.assertEqual(adapter.version, 'version/something') 258 | -------------------------------------------------------------------------------- /k8s_handle/k8s/test_api_clients.py: -------------------------------------------------------------------------------- 1 | import types 2 | import unittest 3 | from unittest.mock import Mock 4 | from unittest.mock import call 5 | from unittest.mock import patch 6 | 7 | from urllib3 import HTTPResponse 8 | from kubernetes.client.rest import RESTResponse 9 | 10 | from k8s_handle.exceptions import InvalidWarningHeader 11 | from .api_clients import ApiClientWithWarningHandler 12 | 13 | 14 | class TestApiClientWithWarningHandler(unittest.TestCase): 15 | def setUp(self): 16 | self.warning_handler = types.SimpleNamespace() 17 | self.warning_handler.handle_warning_header = Mock() 18 | self.api_client = ApiClientWithWarningHandler(warning_handler=self.warning_handler) 19 | 20 | @patch('kubernetes.client.api_client.ApiClient.request') 21 | def _test_request(self, headers, mocked_request): 22 | mocked_request.return_value = RESTResponse(HTTPResponse(headers=headers)) 23 | self.api_client.request() 24 | return self.api_client.warning_handler.handle_warning_header 25 | 26 | def test_request(self): 27 | handler = self._test_request([ 28 | ('Warning', '299 - "warning 1"'), 29 | ]) 30 | handler.assert_called_with(299, '-', 'warning 1') 31 | 32 | def test_request_with_multiple_headers(self): 33 | handler = self._test_request([ 34 | ('Warning', '299 - "warning 1"'), 35 | ('Warning', '299 - "warning 2", 299 - "warning 3"'), 36 | ]) 37 | handler.assert_has_calls([ 38 | call(299, '-', 'warning 1'), 39 | call(299, '-', 'warning 2'), 40 | call(299, '-', 'warning 3'), 41 | ]) 42 | 43 | def test_request_without_header(self): 44 | headers = [] 45 | self._test_request(headers).assert_not_called() 46 | 47 | def test_request_with_invalid_headers(self): 48 | with self.assertLogs("k8s_handle.k8s.api_clients", level="ERROR"): 49 | self._test_request([ 50 | ('Warning', 'invalid'), 51 | ]) 52 | 53 | def test_parse_warning_headers(self): 54 | self.assertEqual( 55 | self.api_client._parse_warning_headers( 56 | ['299 - "warning 1"'], 57 | ), 58 | [(299, '-', 'warning 1')], 59 | ) 60 | 61 | def test_parse_warning_headers_with_invalid_header(self): 62 | with self.assertRaisesRegex(InvalidWarningHeader, "Invalid warning header: fewer than 3 segments"): 63 | self.api_client._parse_warning_headers(['invalid']) 64 | 65 | def test_parse_warning_headers_with_invalid_code(self): 66 | with self.assertRaisesRegex(InvalidWarningHeader, "Invalid warning header: code segment is not 3 digits"): 67 | self.api_client._parse_warning_headers(['1000 - "warning 3"']) 68 | 69 | def test_parse_warning_headers_with_unquoted_text(self): 70 | with self.assertRaisesRegex( 71 | InvalidWarningHeader, 72 | "Invalid warning header: invalid quoted string: missing closing quote" 73 | ): 74 | self.api_client._parse_warning_headers(['299 - "warning unquoted']) 75 | -------------------------------------------------------------------------------- /k8s_handle/k8s/test_provisioner.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from k8s_handle import settings 4 | from k8s_handle.exceptions import ProvisioningError 5 | from k8s_handle.templating import get_template_contexts 6 | from .adapters import AdapterBuiltinKind 7 | from .mocks import K8sClientMock 8 | from .provisioner import Provisioner 9 | 10 | 11 | class TestProvisioner(unittest.TestCase): 12 | def setUp(self): 13 | settings.GET_ENVIRON_STRICT = False 14 | 15 | def test_deployment_wait_complete_fail(self): 16 | client = AdapterBuiltinKind( 17 | api=K8sClientMock('test1'), 18 | spec={'kind': 'Deployment', 'metadata': {'name': 'test1'}, 'spec': {'replicas': 1}}) 19 | with self.assertRaises(RuntimeError) as context: 20 | Provisioner('deploy', False, None)._wait_deployment_complete(client, tries=1, timeout=0) 21 | self.assertTrue('Deployment not completed for 1 tries' in str(context.exception), context.exception) 22 | 23 | def test_deployment_wait_complete(self): 24 | client = AdapterBuiltinKind( 25 | api=K8sClientMock('test2'), 26 | spec={'kind': 'Deployment', 'metadata': {'name': 'test1'}, 'spec': {'replicas': 1}}) 27 | Provisioner('deploy', False, None)._wait_deployment_complete(client, tries=1, timeout=0) 28 | 29 | def test_statefulset_wait_complete_fail(self): 30 | client = AdapterBuiltinKind(api=K8sClientMock('test1'), 31 | spec={'kind': 'StatefulSet', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 32 | with self.assertRaises(RuntimeError) as context: 33 | Provisioner('deploy', False, None)._wait_statefulset_complete(client, tries=1, timeout=0) 34 | self.assertTrue('StatefulSet not completed for 1 tries' in str(context.exception), context.exception) 35 | 36 | def test_statefulset_wait_complete(self): 37 | client = AdapterBuiltinKind(api=K8sClientMock('test2'), 38 | spec={'kind': 'StatefulSet', 'metadata': {'name': ''}, 'spec': {'replicas': 3}}) 39 | Provisioner('deploy', False, None)._wait_statefulset_complete(client, tries=1, timeout=0) 40 | 41 | def test_daemonset_wait_complete_fail(self): 42 | client = AdapterBuiltinKind(api=K8sClientMock('test1'), 43 | spec={'kind': 'DaemonSet', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 44 | with self.assertRaises(RuntimeError) as context: 45 | Provisioner('deploy', False, None)._wait_daemonset_complete(client, tries=1, timeout=0) 46 | self.assertTrue('DaemonSet not completed for 1 tries' in str(context.exception), context.exception) 47 | 48 | def test_daemonset_wait_complete(self): 49 | client = AdapterBuiltinKind(api=K8sClientMock('test2'), 50 | spec={'kind': 'DaemonSet', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 51 | Provisioner('deploy', False, None)._wait_daemonset_complete(client, tries=1, timeout=0) 52 | 53 | def test_job_wait_complete_fail(self): 54 | client = AdapterBuiltinKind(api=K8sClientMock('test1'), 55 | spec={'kind': 'Job', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 56 | with self.assertRaises(RuntimeError) as context: 57 | Provisioner('deploy', False, None)._wait_job_complete(client, tries=1, timeout=0) 58 | 59 | self.assertTrue('Job running failed' in str(context.exception)) 60 | 61 | def test_job_wait_complete_conditions_fail(self): 62 | client = AdapterBuiltinKind(api=K8sClientMock('test2'), 63 | spec={'kind': 'Job', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 64 | with self.assertRaises(RuntimeError) as context: 65 | Provisioner('deploy', False, None)._wait_job_complete(client, tries=1, timeout=0) 66 | self.assertTrue('Job not completed for 1 tries' in str(context.exception), context.exception) 67 | 68 | def test_job_wait_complete(self): 69 | client = AdapterBuiltinKind(api=K8sClientMock('test3'), 70 | spec={'kind': 'Job', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 71 | Provisioner('deploy', False, None)._wait_job_complete(client, tries=1, timeout=0) 72 | 73 | def test_ns_from_template(self): 74 | client = AdapterBuiltinKind(api=K8sClientMock('test'), 75 | spec={'kind': 'Job', 'metadata': {'name': '', 'namespace': 'test'}, 76 | 'spec': {'replicas': 1}}) 77 | self.assertEqual(client.namespace, 'test') 78 | 79 | def test_ns_from_config(self): 80 | settings.K8S_NAMESPACE = 'namespace' 81 | client = AdapterBuiltinKind(api=K8sClientMock('test'), 82 | spec={'kind': 'Job', 'metadata': {'name': ''}, 'spec': {'replicas': 1}}) 83 | self.assertEqual(client.namespace, 'namespace') 84 | 85 | def test_deployment_destruction_wait_fail(self): 86 | client = AdapterBuiltinKind( 87 | api=K8sClientMock('test1'), 88 | spec={'kind': 'Deployment', 'metadata': {'name': 'test1'}, 'spec': {'replicas': 1}}) 89 | with self.assertRaises(RuntimeError) as context: 90 | Provisioner('destroy', False, None)._wait_destruction_complete(client, 'Deployment', tries=1, timeout=0) 91 | self.assertTrue('Deployment destruction not completed for 1 tries' in str(context.exception), context.exception) 92 | 93 | def test_deployment_destruction_wait_success(self): 94 | client = AdapterBuiltinKind( 95 | api=K8sClientMock('404'), 96 | spec={'kind': 'Deployment', 'metadata': {'name': 'test1'}, 'spec': {'replicas': 1}}) 97 | Provisioner('destroy', False, None)._wait_destruction_complete(client, 'Deployment', tries=1, timeout=0) 98 | 99 | def test_job_destruction_wait_fail(self): 100 | client = AdapterBuiltinKind( 101 | api=K8sClientMock('test1'), 102 | spec={'kind': 'Job', 'metadata': {'name': 'test1'}, 'spec': {'replicas': 1}}) 103 | with self.assertRaises(RuntimeError) as context: 104 | Provisioner('deploy', True, None)._wait_destruction_complete(client, 'Job', tries=1, timeout=0) 105 | self.assertTrue('Job destruction not completed for 1 tries' in str(context.exception), context.exception) 106 | 107 | def test_job_destruction_wait_success(self): 108 | client = AdapterBuiltinKind( 109 | api=K8sClientMock('404'), 110 | spec={'kind': 'Job', 'metadata': {'name': 'test1'}, 'spec': {'replicas': 1}}) 111 | Provisioner('destroy', False, None)._wait_destruction_complete(client, 'Job', tries=1, timeout=0) 112 | 113 | def test_deploy_replace(self): 114 | settings.CHECK_STATUS_TIMEOUT = 0 115 | Provisioner('deploy', False, None).run("k8s_handle/k8s/fixtures/deployment.yaml") 116 | 117 | def test_deploy_create(self): 118 | Provisioner('deploy', False, None).run("k8s_handle/k8s/fixtures/deployment_404.yaml") 119 | 120 | def test_deploy_unknown_api(self): 121 | with self.assertRaises(RuntimeError) as context: 122 | Provisioner('deploy', False, None).run("k8s_handle/k8s/fixtures/deployment_no_api.yaml") 123 | self.assertTrue('Unknown apiVersion "test" in template "k8s_handle/k8s/fixtures/deployment_no_api.yaml"' 124 | in str(context.exception), context.exception) 125 | 126 | def test_service_replace(self): 127 | Provisioner('deploy', False, None).run("k8s_handle/k8s/fixtures/service.yaml") 128 | 129 | def test_service_replace_no_ports(self): 130 | Provisioner('deploy', False, None).run("k8s_handle/k8s/fixtures/service_no_ports.yaml") 131 | 132 | def test_destroy_unknown_api(self): 133 | with self.assertRaises(RuntimeError) as context: 134 | Provisioner('destroy', False, None).run("k8s_handle/k8s/fixtures/deployment_no_api.yaml") 135 | self.assertTrue('Unknown apiVersion "test" in template "k8s_handle/k8s/fixtures/deployment_no_api.yaml"' 136 | in str(context.exception), context.exception) 137 | 138 | def test_destroy_not_found(self): 139 | Provisioner('destroy', False, None).run("k8s_handle/k8s/fixtures/deployment_404.yaml") 140 | 141 | def test_destroy_fail(self): 142 | with self.assertRaises(RuntimeError) as context: 143 | Provisioner('destroy', False, None).run("k8s_handle/k8s/fixtures/service.yaml") 144 | self.assertTrue('' in str(context.exception), context.exception) 145 | 146 | def test_destroy_success(self): 147 | Provisioner('destroy', False, None).run("k8s_handle/k8s/fixtures/deployment.yaml") 148 | 149 | def test_pvc_replace_equals(self): 150 | Provisioner('deploy', False, None).run("k8s_handle/k8s/fixtures/pvc.yaml") 151 | 152 | def test_pvc_replace_not_equals(self): 153 | with self.assertRaises(ProvisioningError) as context: 154 | Provisioner('deploy', False, None).run("k8s_handle/k8s/fixtures/pvc2.yaml") 155 | self.assertTrue('Replace persistent volume claim fail' in str(context.exception), context.exception) 156 | 157 | # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#volume-mode 158 | def test_pvc_replace_new_attribute(self): 159 | with self.assertRaises(ProvisioningError) as context: 160 | Provisioner('deploy', False, None).run("k8s_handle/k8s/fixtures/pvc3.yaml") 161 | self.assertTrue('Replace persistent volume claim fail' 162 | in str(context.exception)) 163 | 164 | def test_get_template_contexts(self): 165 | with self.assertRaises(StopIteration): 166 | next(get_template_contexts('k8s_handle/k8s/fixtures/empty.yaml')) 167 | 168 | with self.assertRaises(RuntimeError) as context: 169 | next(get_template_contexts('k8s_handle/k8s/fixtures/nokind.yaml')) 170 | self.assertTrue( 171 | 'Field "kind" not found (or empty) in file "k8s_handle/k8s/fixtures/nokind.yaml"' in str(context.exception), 172 | context.exception) 173 | 174 | with self.assertRaises(RuntimeError) as context: 175 | next(get_template_contexts('k8s_handle/k8s/fixtures/nometadata.yaml')) 176 | self.assertTrue( 177 | 'Field "metadata" not found (or empty) in file "k8s_handle/k8s/fixtures/nometadata.yaml"' 178 | in str(context.exception), 179 | context.exception) 180 | 181 | with self.assertRaises(RuntimeError) as context: 182 | next(get_template_contexts('k8s_handle/k8s/fixtures/nometadataname.yaml')) 183 | self.assertTrue( 184 | 'Field "metadata->name" not found (or empty) in file "k8s_handle/k8s/fixtures/nometadataname.yaml"' 185 | in str(context.exception), context.exception) 186 | 187 | context = next(get_template_contexts('k8s_handle/k8s/fixtures/valid.yaml')) 188 | self.assertEqual(context.get('kind'), 'Service') 189 | self.assertEqual(context.get('apiVersion'), 'v1') 190 | self.assertEqual(context.get('metadata').get('name'), 'my-service') 191 | self.assertEqual(context.get('spec').get('selector').get('app'), 'my-app') 192 | 193 | context = next(get_template_contexts('k8s_handle/k8s/fixtures/deployment_wo_replicas.yaml')) 194 | self.assertEqual(context.get('spec').get('replicas'), 1) 195 | 196 | 197 | class TestKubeObject(unittest.TestCase): 198 | def test_replicas_equal(self): 199 | replicas = (1, 1, 1) 200 | self.assertTrue(Provisioner._replicas_count_are_equal(replicas)) 201 | 202 | def test_replicas_not_equal(self): 203 | replicas = (1, 1, 0) 204 | self.assertFalse(Provisioner._replicas_count_are_equal(replicas)) 205 | -------------------------------------------------------------------------------- /k8s_handle/k8s/test_warning_handler.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from .warning_handler import WarningHandler 4 | 5 | 6 | class TestWarningHandler(unittest.TestCase): 7 | def setUp(self): 8 | self.handler = WarningHandler() 9 | 10 | def test_handle_warning_header(self): 11 | with self.assertLogs("k8s_handle.k8s.warning_handler", level="WARNING") as cm: 12 | self.handler.handle_warning_header(299, "-", "warning") 13 | 14 | self.assertEqual(cm.output, ['WARNING:k8s_handle.k8s.warning_handler:\x1b[33;1m\n' 15 | ' ▄▄\n' 16 | ' ████\n' 17 | ' ██▀▀██\n' 18 | ' ███ ███ warning\n' 19 | ' ████▄▄████\n' 20 | ' █████ █████\n' 21 | ' ██████████████\n' 22 | '\x1b[0m']) 23 | 24 | def test_handle_warning_header_with_unexpected_code(self): 25 | with self.assertNoLogs("k8s_handle.k8s.warning_handler", level="WARNING"): 26 | self.handler.handle_warning_header(0, "-", "warning") 27 | 28 | def test_handle_warning_header_with_empty_message(self): 29 | with self.assertNoLogs("k8s_handle.k8s.warning_handler", level="WARNING"): 30 | self.handler.handle_warning_header(299, "-", "") 31 | 32 | def test_handle_warning_header_with_duplicate_messages(self): 33 | with self.assertLogs("k8s_handle.k8s.warning_handler", level="WARNING"): 34 | self.handler.handle_warning_header(299, "-", "warning") 35 | 36 | with self.assertNoLogs("k8s_handle.k8s.warning_handler", level="WARNING"): 37 | self.handler.handle_warning_header(299, "-", "warning") 38 | -------------------------------------------------------------------------------- /k8s_handle/k8s/warning_handler.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | log = logging.getLogger(__name__) 4 | 5 | YELLOW_COLOR = "\u001b[33;1m" 6 | RESET_COLOR = "\u001b[0m" 7 | 8 | WARNING_TEMPLATE = """ 9 | ▄▄ 10 | ████ 11 | ██▀▀██ 12 | ███ ███ {text} 13 | ████▄▄████ 14 | █████ █████ 15 | ██████████████ 16 | """ 17 | 18 | 19 | class WarningHandler(): 20 | def __init__(self): 21 | self.written = [] 22 | 23 | def handle_warning_header(self, code, agent, text): 24 | if code != 299 or len(text) == 0: 25 | return 26 | 27 | if text in self.written: 28 | return 29 | 30 | log.warning(self._yellow(WARNING_TEMPLATE.format(text=text))) 31 | 32 | self.written += [text] 33 | 34 | @staticmethod 35 | def _yellow(str): 36 | return f"{YELLOW_COLOR}{str}{RESET_COLOR}" 37 | -------------------------------------------------------------------------------- /k8s_handle/settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | LOG_LEVEL = os.environ.get('LOG_LEVEL', 'INFO') 4 | LOG_FORMAT = '%(asctime)s %(levelname)s:%(name)s:%(message)s' 5 | LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S' 6 | 7 | CONFIG_FILE = os.environ.get('CONFIG_FILE', 'config.yaml') 8 | 9 | COMMON_SECTION_NAME = 'common' 10 | TEMPLATES_DIR = os.environ.get('TEMPLATES_DIR', 'templates') 11 | 12 | K8S_CONFIG_DIR = os.environ.get('K8S_CONFIG_DIR', '{}/.kube/'.format(os.path.expanduser('~'))) 13 | 14 | K8S_NAMESPACE = None 15 | 16 | TEMP_DIR = os.environ.get('TEMP_DIR', '/tmp/k8s-handle') 17 | 18 | CHECK_STATUS_TRIES = 360 19 | CHECK_STATUS_TIMEOUT = 5 20 | 21 | CHECK_CONTAINERS_IN_POD_TRIES = 360 22 | CHECK_CONTAINERS_IN_POD_TIMEOUT = 5 23 | 24 | CHECK_POD_STATUS_TRIES = 360 25 | CHECK_POD_STATUS_TIMEOUT = 5 26 | 27 | CHECK_DAEMONSET_STATUS_TRIES = 10 28 | CHECK_DAEMONSET_STATUS_TIMEOUT = 5 29 | 30 | COUNT_LOG_LINES = None 31 | 32 | GET_ENVIRON_STRICT = False 33 | -------------------------------------------------------------------------------- /k8s_handle/templating.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import glob 3 | import itertools 4 | import logging 5 | import os 6 | import re 7 | from hashlib import sha256 8 | 9 | import yaml 10 | from jinja2 import Environment, FileSystemLoader, StrictUndefined 11 | from jinja2.exceptions import TemplateNotFound, TemplateSyntaxError, UndefinedError 12 | 13 | from k8s_handle import settings 14 | from k8s_handle.exceptions import TemplateRenderingError 15 | 16 | log = logging.getLogger(__name__) 17 | 18 | 19 | def get_template_contexts(file_path): 20 | try: 21 | with open(file_path) as f: 22 | try: 23 | contexts = yaml.safe_load_all(f.read()) 24 | except Exception as e: 25 | raise RuntimeError('Unable to load yaml file: {}, {}'.format(file_path, e)) 26 | 27 | for context in contexts: 28 | if context is None: 29 | continue # Skip empty YAML documents 30 | if 'kind' not in context or context['kind'] is None: 31 | raise RuntimeError('Field "kind" not found (or empty) in file "{}"'.format(file_path)) 32 | if 'metadata' not in context or context['metadata'] is None: 33 | raise RuntimeError('Field "metadata" not found (or empty) in file "{}"'.format(file_path)) 34 | if 'name' not in context['metadata'] or context['metadata']['name'] is None: 35 | raise RuntimeError('Field "metadata->name" not found (or empty) in file "{}"'.format(file_path)) 36 | if 'spec' in context: 37 | # INFO: Set replicas = 1 by default for replaces cases in Deployment and StatefulSet 38 | if 'replicas' not in context['spec'] or context['spec']['replicas'] is None: 39 | if context['kind'] in ['Deployment', 'StatefulSet']: 40 | context['spec']['replicas'] = 1 41 | yield context 42 | except FileNotFoundError as e: 43 | raise RuntimeError(e) 44 | 45 | 46 | def b64decode(string): 47 | res = base64.decodebytes(string.encode()) 48 | return res.decode() 49 | 50 | 51 | def b64encode(string): 52 | res = base64.b64encode(string.encode()) 53 | return res.decode() 54 | 55 | 56 | def hash_sha256(string): 57 | res = sha256() 58 | res.update(string.encode('utf-8')) 59 | return res.hexdigest() 60 | 61 | 62 | def to_yaml(data, flow_style=True, width=99999): 63 | return yaml.safe_dump(data, default_flow_style=flow_style, width=width) 64 | 65 | 66 | def get_env(templates_dir): 67 | # https://stackoverflow.com/questions/9767585/insert-static-files-literally-into-jinja-templates-without-parsing-them 68 | def include_file(path): 69 | path = os.path.join(templates_dir, '../', path) 70 | output = [] 71 | for file_path in sorted(glob.glob(path)): 72 | with open(file_path, 'r') as f: 73 | output.append(f.read()) 74 | return '\n'.join(output) 75 | 76 | def list_files(path): 77 | path = os.path.join(templates_dir, '../', path) 78 | if os.path.isdir(path): 79 | files = [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))] 80 | else: 81 | files = glob.glob(path) 82 | return sorted(files) 83 | 84 | env = Environment( 85 | undefined=StrictUndefined, 86 | loader=FileSystemLoader([templates_dir])) 87 | 88 | env.filters['b64decode'] = b64decode 89 | env.filters['b64encode'] = b64encode 90 | env.filters['hash_sha256'] = hash_sha256 91 | env.filters['to_yaml'] = to_yaml 92 | env.globals['include_file'] = include_file 93 | env.globals['list_files'] = list_files 94 | 95 | log.debug('Available templates in path {}: {}'.format(templates_dir, env.list_templates())) 96 | return env 97 | 98 | 99 | class Renderer: 100 | def __init__(self, templates_dir, tags=None, tags_skip=None): 101 | self._templates_dir = templates_dir 102 | self._tags = tags 103 | self._tags_skip = tags_skip 104 | self._env = get_env(self._templates_dir) 105 | 106 | def _iterate_entries(self, entries, tags=None): 107 | if tags is None: 108 | tags = set() 109 | 110 | for entry in entries: 111 | entry["tags"] = self._get_template_tags(entry).union(tags) 112 | 113 | if "group" not in entry.keys(): 114 | if not self._evaluate_tags(entry.get("tags"), self._tags, self._tags_skip): 115 | continue 116 | yield entry 117 | 118 | for nested_entry in self._iterate_entries(entry.get("group", []), entry.get("tags")): 119 | yield nested_entry 120 | 121 | def _preprocess_templates(self, templates): 122 | output = [] 123 | for template in templates: 124 | tags = template.get('tags', []) 125 | new_templates = [] 126 | try: 127 | regex = re.compile(template.get('template')) 128 | new_templates = list( 129 | map(lambda x: {'template': x, 'tags': tags}, filter(regex.search, self._env.list_templates()))) 130 | except Exception as e: 131 | log.warning(f'Exception during preprocess {template}, {e}, passing it as is') 132 | 133 | if len(new_templates) == 0: 134 | output.append(template) 135 | else: 136 | output += new_templates 137 | return output 138 | 139 | def generate_by_context(self, context): 140 | if context is None: 141 | raise RuntimeError('Can\'t generate templates from None context') 142 | 143 | templates = self._preprocess_templates(context.get('templates', [])) 144 | if len(templates) == 0: 145 | templates = context.get('kubectl', []) 146 | if len(templates) == 0: 147 | return 148 | 149 | output = [] 150 | for template in self._iterate_entries(templates): 151 | try: 152 | path = self._generate_file(template, settings.TEMP_DIR, context) 153 | log.info('File "{}" successfully generated'.format(path)) 154 | output.append(path) 155 | except TemplateNotFound as e: 156 | raise TemplateRenderingError( 157 | "Processing {}: template {} hasn't been found".format(template['template'], e.name)) 158 | except (UndefinedError, TemplateSyntaxError) as e: 159 | raise TemplateRenderingError('Unable to render {}, due to: {}'.format(template, e)) 160 | return output 161 | 162 | def _generate_file(self, item, directory, context): 163 | try: 164 | log.info('Trying to generate file from template "{}" in "{}"'.format(item['template'], directory)) 165 | template = self._env.get_template(item['template']) 166 | except TemplateNotFound as e: 167 | log.info('Templates path: {}, available templates: {}'.format(self._templates_dir, 168 | self._env.list_templates())) 169 | raise e 170 | except KeyError: 171 | raise RuntimeError('Templates section doesn\'t have any template items') 172 | 173 | new_name = item['template'].replace('.j2', '') 174 | path = os.path.join(directory, new_name) 175 | 176 | try: 177 | if not os.path.exists(os.path.dirname(path)): 178 | os.makedirs(os.path.dirname(path)) 179 | 180 | with open(path, 'w+') as f: 181 | f.write(template.render(context)) 182 | 183 | except TemplateRenderingError: 184 | raise 185 | except (FileNotFoundError, PermissionError) as e: 186 | raise RuntimeError(e) 187 | 188 | return path 189 | 190 | @staticmethod 191 | def _get_template_tags(template): 192 | if 'tags' not in template: 193 | return set() 194 | 195 | tags = template['tags'] 196 | 197 | if isinstance(tags, list): 198 | return set([i for i, _ in itertools.groupby(tags)]) 199 | 200 | if isinstance(tags, str): 201 | return set(tags.split(',')) 202 | 203 | raise TypeError('Unable to parse tags of "{}" template: unexpected type {}'.format(template, type(tags))) 204 | 205 | @staticmethod 206 | def _evaluate_tags(tags, only_tags, skip_tags): 207 | if only_tags and tags.isdisjoint(only_tags): 208 | return False 209 | 210 | if skip_tags and not tags.isdisjoint(skip_tags): 211 | return False 212 | 213 | return True 214 | -------------------------------------------------------------------------------- /k8s_handle/transforms.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | 5 | def split_str_by_capital_letters(item): 6 | # upper the first letter 7 | item = item[0].upper() + item[1:] 8 | # transform 'Service' to 'service', 'CronJob' to 'cron_job', 'TargetPort' to 'target_port', etc. 9 | return '_'.join(re.findall(r'[A-Z][^A-Z]*', item)).lower() 10 | 11 | 12 | def add_indent(json_str): 13 | try: 14 | return json.dumps(json.loads(json_str), indent=4) 15 | except: # NOQA 16 | return json_str 17 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests==2.32.2 2 | jinja2==3.1.6 3 | PyYAML==6.0.1 4 | kubernetes==27.2.0 5 | semver==3.0.1 6 | urllib3==1.26.19 7 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from setuptools import setup, find_packages 3 | 4 | readme_path = os.path.join( 5 | os.path.dirname(os.path.abspath(__file__)), 6 | 'README.md') 7 | 8 | 9 | def get_content(path): 10 | with open(path, 'r') as f: 11 | return f.read() 12 | 13 | 14 | setup(name='k8s-handle', 15 | version=os.environ.get('RELEASE_TAG', '0.0.0'), 16 | long_description=get_content(readme_path), 17 | long_description_content_type='text/markdown', 18 | description='Provisioning tool for Kubernetes apps', 19 | url='http://github.com/2gis/k8s-handle', 20 | author='Vadim Reyder', 21 | author_email='vadim.reyder@gmail.com', 22 | license='Apache 2.0', 23 | packages=find_packages(exclude=("tests",)), 24 | data_files=['requirements.txt'], 25 | entry_points={ 26 | "console_scripts": [ 27 | "k8s-handle=k8s_handle:main", 28 | ] 29 | }, 30 | install_requires=get_content('requirements.txt').split('\n'), 31 | zip_safe=False) 32 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/2gis/k8s-handle/144fab41f5c59f458333a9e50777641b96b651de/tests/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/config.yaml: -------------------------------------------------------------------------------- 1 | common: 2 | my_var: my_value 3 | my_file: "{{ file='tests/fixtures/include.yaml' }}" 4 | my_env_var: "{{ env='CUSTOM_ENV' }}" 5 | my_conc_env_var1: "prefix-{{ env='CUSTOM_ENV' }}-postfix" 6 | my_conc_env_var2: "prefix-{{ env='CUSTOM_ENV' }}" 7 | my_conc_env_var3: "{{ env='CUSTOM_ENV' }}-postfix" 8 | empty_var: "{{ env='EMPTY_ENV' }}" 9 | k8s_master_uri: https://localhost:5000 10 | k8s_token: token 11 | k8s_ca_base64: ca 12 | k8s_namespace: namespace 13 | 14 | var: 15 | router: 16 | your: 1 17 | my1: var1 18 | 19 | test_recursive_vars: 20 | templates: 21 | - template: template1.yaml.j2 22 | var: 23 | router: 24 | my: var 25 | your: 2 26 | 27 | test_dirs: 28 | dirs: 29 | - dst: /tmp/test_directory1/ 30 | - dst: /tmp/test_directory2/ 31 | templates: 32 | - template: template1.yaml.j2 33 | - template: template2.yaml.j2 34 | - template: template3.yaml.j2 35 | - template: template_include_file.yaml.j2 36 | - template: innerdir/template1.yaml.j2 37 | - template: template_list_files.yaml.j2 38 | 39 | no_templates: 40 | templates: 41 | - namespace: io 42 | 43 | no_templates_section: 44 | new_var: 123 45 | 46 | deployment: 47 | templates: 48 | - template: deployment.yaml.j2 49 | 50 | deployment_without_name: 51 | templates: 52 | - template: deployment.json.j2 53 | 54 | service: 55 | templates: 56 | - template: service.yaml.j2 57 | 58 | io_2709: 59 | templates: 60 | - template: template4.yaml.j2 61 | 62 | not_existent_template: 63 | templates: 64 | - template: doesnotexist.yaml.j2 65 | 66 | templates_regex: 67 | templates: 68 | - template: innerdir/.*\.j2 69 | - template: template1.yaml.j2 70 | 71 | templates_regex_invalid: 72 | templates: 73 | - template: '[' 74 | - template: template1.yaml.j2 75 | 76 | test_filters: 77 | to_base64: hello world 78 | from_base64: azhzLWhhbmRsZQ== 79 | to_sha256: k8s-hanle 80 | affinity: 81 | - key: "dedicated" 82 | operator: "Equal" 83 | value: "monitoring" 84 | effect: "NoSchedule" 85 | - key: "dedicated" 86 | operator: "Equal" 87 | value: 88 | hello: world 89 | effect: "NoSchedule" 90 | templates: 91 | - template: filters.yaml.j2 92 | 93 | section_with_kubectl: 94 | dirs: 95 | - dst: /tmp/test_directory1/ 96 | - dst: /tmp/test_directory2/ 97 | kubectl: 98 | - template: template1.yaml.j2 99 | - template: template2.yaml.j2 100 | - template: template3.yaml.j2 101 | - template: innerdir/template1.yaml.j2 102 | 103 | test_groups: 104 | templates: 105 | - template: template1.yaml.j2 106 | - group: 107 | - group: 108 | - template: template2.yaml.j2 109 | - template: template3.yaml.j2 110 | 111 | test_dashes: 112 | templates: 113 | - template: template-dashes.yaml.j2 114 | region: us-east1 115 | regionMap: 116 | us-east1: do this 117 | us-west1: do that 118 | 119 | -------------------------------------------------------------------------------- /tests/fixtures/config_with_env_vars.yaml: -------------------------------------------------------------------------------- 1 | common: 2 | k8s_master_uri: https://localhost:5000 3 | k8s_token: token 4 | k8s_namespace: namespace 5 | 6 | section-1: 7 | var: "{{ env='SECTION1' }}" 8 | templates: 9 | - template: template1.yaml.j2 10 | 11 | section-2: 12 | var: "{{ env='SECTION2' }}" 13 | template: 14 | - template: template1.yaml.j2 15 | -------------------------------------------------------------------------------- /tests/fixtures/config_with_include_and_env_vars.yaml: -------------------------------------------------------------------------------- 1 | common: 2 | k8s_master_uri: https://localhost:5000 3 | k8s_token: token 4 | k8s_ca_base64: ca 5 | k8s_namespace: namespace 6 | 7 | section-1: 8 | var: "{{ file='tests/fixtures/include_2levels_with_env.yaml' }}" 9 | kubectl: 10 | - template: template1.yaml.j2 11 | 12 | section-2: 13 | var: "{{ file='tests/fixtures/include_with_env.yaml' }}" 14 | kubectl: 15 | - template: template2.yaml.j2 16 | 17 | section-3: 18 | var: "{{ file='tests/fixtures/incorrect_include.yaml' }}" 19 | kubectl: 20 | - template: template2.yaml.j2 21 | -------------------------------------------------------------------------------- /tests/fixtures/config_without_k8s.yaml: -------------------------------------------------------------------------------- 1 | common: 2 | my_var: my_value 3 | my_env_var: "{{ env='CUSTOM_ENV' }}" 4 | k8s_master_uri: https://localhost:5000 5 | k8s_namespace: namespace 6 | k8s_token: "" 7 | 8 | deployment: 9 | templates: 10 | - template: deployment.yaml.j2 11 | -------------------------------------------------------------------------------- /tests/fixtures/dashes_config.yaml: -------------------------------------------------------------------------------- 1 | common: 2 | my_file: "{{ file='tests/fixtures/include.yaml' }}" 3 | k8s_master_uri: https://localhost:5000 4 | k8s_token: token 5 | k8s_ca_base64: ca 6 | k8s_namespace: namespace 7 | var: 8 | router: 9 | your-var: 1 10 | my1: var1 11 | templates: 12 | - template: template1.yaml.j2 13 | 14 | allowed: 15 | var: 16 | router: 17 | my-nested-var: var 18 | your: 2 19 | 20 | not_allowed: 21 | my-var: my_value 22 | my-var-with-dashes: not allowed 23 | 24 | -------------------------------------------------------------------------------- /tests/fixtures/empty_config.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/2gis/k8s-handle/144fab41f5c59f458333a9e50777641b96b651de/tests/fixtures/empty_config.yaml -------------------------------------------------------------------------------- /tests/fixtures/include.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ha_ha: "included_var" 3 | -------------------------------------------------------------------------------- /tests/fixtures/include_2levels_with_env.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | included_file: "{{ file='tests/fixtures/include_with_env.yaml' }}" 3 | -------------------------------------------------------------------------------- /tests/fixtures/include_with_env.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | included_env: "{{ env='INC_ENV' }}" 3 | -------------------------------------------------------------------------------- /tests/fixtures/incorrect_config.yaml: -------------------------------------------------------------------------------- 1 | [{]}] -------------------------------------------------------------------------------- /tests/fixtures/incorrect_include.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | included_file: "{{ file='tests/fixtures/incorrect_include2.yaml' }}" 4 | -------------------------------------------------------------------------------- /tests/fixtures/incorrect_include2.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | included_file: "{{ file='tests/fixtures/incorrect_include.yaml' }}" 4 | -------------------------------------------------------------------------------- /tests/templates_tests/filters.yaml.j2: -------------------------------------------------------------------------------- 1 | b64encode: {{ to_base64 | b64encode }} 2 | b64decode: {{ from_base64 | b64decode }} 3 | sha256: {{ to_sha256 | hash_sha256 }} 4 | affinity: {{ affinity | to_yaml() }} 5 | -------------------------------------------------------------------------------- /tests/templates_tests/innerdir/template1.yaml.j2: -------------------------------------------------------------------------------- 1 | {{ my_file }} -------------------------------------------------------------------------------- /tests/templates_tests/my_file.txt: -------------------------------------------------------------------------------- 1 | {{ hello world }} 2 | new 3 | line -------------------------------------------------------------------------------- /tests/templates_tests/my_file1.txt: -------------------------------------------------------------------------------- 1 | {{ hello world1 }} 2 | -------------------------------------------------------------------------------- /tests/templates_tests/template-dashes.yaml.j2: -------------------------------------------------------------------------------- 1 | {{ regionMap[region] }} 2 | -------------------------------------------------------------------------------- /tests/templates_tests/template1.yaml.j2: -------------------------------------------------------------------------------- 1 | {{ my_file }} -------------------------------------------------------------------------------- /tests/templates_tests/template2.yaml.j2: -------------------------------------------------------------------------------- 1 | {{ my_env_var | b64encode }} 2 | -------------------------------------------------------------------------------- /tests/templates_tests/template3.yaml.j2: -------------------------------------------------------------------------------- 1 | {{ 'TXkgdmFsdWU=' | b64decode }} 2 | -------------------------------------------------------------------------------- /tests/templates_tests/template4.yaml.j2: -------------------------------------------------------------------------------- 1 | kind: MyResource 2 | metadata: 3 | name: MyName 4 | {{ undefined_variable }} 5 | -------------------------------------------------------------------------------- /tests/templates_tests/template_include_file.yaml.j2: -------------------------------------------------------------------------------- 1 | test: | 2 | {{ include_file('templates_tests/my_file*.txt') | indent(2) }} 3 | -------------------------------------------------------------------------------- /tests/templates_tests/template_list_files.yaml.j2: -------------------------------------------------------------------------------- 1 | test: | 2 | {% for f in list_files('templates_tests/innerdir') -%} 3 | {{ f.split('/')[-1] }}: 4 | {% endfor -%} 5 | {% for f in list_files('templates_tests/my_file*.txt') -%} 6 | {{ f.split('/')[-1] }}: 7 | {% endfor -%} 8 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import unittest 4 | 5 | from k8s_handle import config 6 | from k8s_handle import settings 7 | from k8s_handle.config import KEY_K8S_CA_BASE64 8 | from k8s_handle.config import KEY_K8S_MASTER_URI 9 | from k8s_handle.config import KEY_K8S_NAMESPACE 10 | from k8s_handle.config import KEY_K8S_NAMESPACE_ENV 11 | from k8s_handle.config import KEY_K8S_TOKEN 12 | from k8s_handle.config import KEY_K8S_CA_BASE64_URI_ENV_DEPRECATED 13 | from k8s_handle.config import KEY_K8S_HANDLE_DEBUG 14 | from k8s_handle.config import KEY_K8S_MASTER_URI_ENV_DEPRECATED 15 | from k8s_handle.config import PriorityEvaluator 16 | from k8s_handle.filesystem import InvalidYamlError 17 | 18 | VALUE_CLI = 'value_cli' 19 | VALUE_CONTEXT = 'value_context' 20 | VALUE_ENV = 'value_env' 21 | VALUE_ENV_DEPRECATED = 'value_env_deprecated' 22 | VALUE_CA = 'Q0EK' 23 | VALUE_TOKEN = 'token' 24 | KUBECONFIG_NAMESPACE = 'kubeconfig_namespace' 25 | 26 | 27 | class TestContextGeneration(unittest.TestCase): 28 | def setUp(self): 29 | settings.CONFIG_FILE = 'tests/fixtures/config.yaml' 30 | settings.TEMPLATES_DIR = 'templates/tests' 31 | os.environ['CUSTOM_ENV'] = 'My value' 32 | os.environ['K8S_CONFIG_DIR'] = '/tmp/kube/' 33 | 34 | def tearDown(self): 35 | if os.path.exists(settings.TEMP_DIR): 36 | shutil.rmtree(settings.TEMP_DIR) 37 | 38 | os.environ.pop('CUSTOM_ENV') 39 | os.environ.pop('K8S_CONFIG_DIR') 40 | 41 | def test_config_not_exist(self): 42 | settings.CONFIG_FILE = 'tests/config.yaml' 43 | with self.assertRaises(Exception) as context: 44 | config.load_context_section('section') 45 | self.assertTrue('No such file or directory: \'tests/config.yaml\'' in str(context.exception)) 46 | 47 | def test_config_is_empty(self): 48 | settings.CONFIG_FILE = 'tests/fixtures/empty_config.yaml' 49 | with self.assertRaises(RuntimeError) as context: 50 | config.load_context_section('section') 51 | self.assertTrue('Config file "tests/fixtures/empty_config.yaml" is empty' in str(context.exception)) 52 | 53 | def test_config_incorrect(self): 54 | settings.CONFIG_FILE = 'tests/fixtures/incorrect_config.yaml' 55 | with self.assertRaises(InvalidYamlError): 56 | config.load_context_section('section') 57 | 58 | def test_not_existed_section(self): 59 | with self.assertRaises(RuntimeError) as context: 60 | config.load_context_section('absent_section') 61 | self.assertTrue('Section "absent_section" not found' in str(context.exception)) 62 | 63 | def test_no_templates(self): 64 | with self.assertRaises(RuntimeError) as context: 65 | config.load_context_section('no_templates_section') 66 | self.assertTrue('Section "templates" or "kubectl" not found in config file' in str(context.exception)) 67 | 68 | def test_empty_section(self): 69 | with self.assertRaises(RuntimeError) as context: 70 | config.load_context_section('') 71 | self.assertEqual('Empty section specification is not allowed', str(context.exception)) 72 | 73 | def test_common_section(self): 74 | with self.assertRaises(RuntimeError) as context: 75 | config.load_context_section(settings.COMMON_SECTION_NAME) 76 | self.assertTrue('Section "{}" is not intended to deploy'.format( 77 | settings.COMMON_SECTION_NAME) in str(context.exception)) 78 | 79 | def test_merge_section_options(self): 80 | settings.TEMPLATES_DIR = 'templates_tests' 81 | c = config.load_context_section('test_dirs') 82 | self.assertEqual(c['my_var'], 'my_value') 83 | self.assertEqual(c['my_env_var'], 'My value') 84 | self.assertEqual(c['my_file'], 85 | {'ha_ha': 'included_var'}) 86 | self.assertTrue(c['dirs']) 87 | 88 | def test_recursive_vars(self): 89 | settings.TEMPLATES_DIR = 'templates_tests' 90 | c = config.load_context_section('test_recursive_vars') 91 | self.assertEqual({'router': { 92 | 'my': 'var', 93 | 'my1': 'var1', 94 | 'your': 2 95 | }}, c['var']) 96 | 97 | def test_concatination_with_env(self): 98 | settings.TEMPLATES_DIR = 'templates_tests' 99 | c = config.load_context_section('test_dirs') 100 | self.assertEqual(c['my_conc_env_var1'], 101 | 'prefix-My value-postfix') 102 | self.assertEqual(c['my_conc_env_var2'], 103 | 'prefix-My value') 104 | self.assertEqual(c['my_conc_env_var3'], 105 | 'My value-postfix') 106 | 107 | def test_dashes_in_var_names(self): 108 | settings.TEMPLATES_DIR = 'templates_tests' 109 | settings.CONFIG_FILE = 'tests/fixtures/dashes_config.yaml' 110 | with self.assertRaises(RuntimeError) as context: 111 | config.load_context_section('not_allowed') 112 | self.assertTrue('Root variable names should never include dashes, ' 113 | 'check your vars please: my-var, my-var-with-dashes' 114 | in str(context.exception), context.exception) 115 | c = config.load_context_section('allowed') 116 | self.assertEqual(c.get('var').get('router').get('your'), 2) 117 | 118 | def test_context_update_recursion(self): 119 | my_dict = { 120 | 'section1': { 121 | 'subsection1': { 122 | 'section1-key1': 'value', 123 | 'section1-key2': 1, 124 | 'section1-key3': 0.1, 125 | 'section1-key4': [0, 1, 2, 3], 126 | 'section1-key5': "{{ env='CUSTOM_ENV' }}", 127 | 'section1-key6': "{{ file='tests/fixtures/include.yaml' }}", 128 | 'section1-key7': "{{ env='CUSTOM_ENV'}} = {{ env='CUSTOM_ENV' }}", 129 | 'section1-key8': "{{ env='NULL_VAR' }}-{{ env='CUSTOM_ENV' }}" 130 | } 131 | }, 132 | 'section2': [ 133 | {}, 134 | 'var2', 135 | 'var3', 136 | '{{ env=\'CUSTOM_ENV\' }}', 137 | '{{ env=\'CUSTOM_ENV\' }} = {{ env=\'CUSTOM_ENV\' }}', 138 | '{{ env=\'NULL_VAR\' }}-{{ env=\'CUSTOM_ENV\' }}' 139 | ], 140 | 'section3': [0, 1, 2, 3, 4] 141 | } 142 | expected_dict = { 143 | 'section1': { 144 | 'subsection1': { 145 | 'section1-key1': 'value', 146 | 'section1-key2': 1, 147 | 'section1-key3': 0.1, 148 | 'section1-key4': [0, 1, 2, 3], 149 | 'section1-key5': 'My value', 150 | 'section1-key6': {'ha_ha': 'included_var'}, 151 | 'section1-key7': 'My value = My value', 152 | 'section1-key8': "-My value" 153 | } 154 | }, 155 | 'section2': [ 156 | {}, 157 | 'var2', 158 | 'var3', 159 | 'My value', 160 | 'My value = My value', 161 | '-My value' 162 | ], 163 | 'section3': [0, 1, 2, 3, 4] 164 | } 165 | self.assertDictEqual(expected_dict, config._update_context_recursively(my_dict)) 166 | 167 | def test_context_update_section(self): 168 | output = config._update_context_recursively('123') 169 | self.assertEqual('123', output) 170 | 171 | def test_env_var_in_section1_dont_set(self): 172 | settings.CONFIG_FILE = 'tests/fixtures/config_with_env_vars.yaml' 173 | settings.GET_ENVIRON_STRICT = True 174 | with self.assertRaises(RuntimeError) as context: 175 | config.load_context_section('section-1') 176 | 177 | settings.GET_ENVIRON_STRICT = False 178 | self.assertTrue('Environment variable "SECTION1" is not set' 179 | in str(context.exception)) 180 | 181 | def test_env_var_in_section2_dont_set(self): 182 | settings.CONFIG_FILE = 'tests/fixtures/config_with_env_vars.yaml' 183 | settings.GET_ENVIRON_STRICT = True 184 | with self.assertRaises(RuntimeError) as context: 185 | config.load_context_section('section-2') 186 | 187 | settings.GET_ENVIRON_STRICT = False 188 | self.assertTrue('Environment variable "SECTION2" is not set' in str(context.exception)) 189 | 190 | def test_env_var_in_include_dont_set(self): 191 | settings.CONFIG_FILE = 'tests/fixtures/config_with_include_and_env_vars.yaml' 192 | settings.GET_ENVIRON_STRICT = True 193 | with self.assertRaises(RuntimeError): 194 | config.load_context_section('section-2') 195 | 196 | settings.GET_ENVIRON_STRICT = False 197 | 198 | def test_env_var_in_include_2_levels_dont_set(self): 199 | settings.CONFIG_FILE = 'tests/fixtures/config_with_include_and_env_vars.yaml' 200 | settings.GET_ENVIRON_STRICT = True 201 | with self.assertRaises(RuntimeError): 202 | config.load_context_section('section-1') 203 | 204 | settings.GET_ENVIRON_STRICT = False 205 | 206 | def test_infinite_recursion_loop(self): 207 | settings.CONFIG_FILE = 'tests/fixtures/config_with_include_and_env_vars.yaml' 208 | with self.assertRaises(RuntimeError): 209 | config.load_context_section('section-3') 210 | 211 | def test_check_empty_var(self): 212 | settings.CONFIG_FILE = 'tests/fixtures/config.yaml' 213 | settings.GET_ENVIRON_STRICT = True 214 | with self.assertRaises(RuntimeError) as context: 215 | config.load_context_section('deployment') 216 | settings.GET_ENVIRON_STRICT = False 217 | self.assertTrue('Environment variable "EMPTY_ENV" is not set' 218 | in str(context.exception)) 219 | 220 | 221 | class TestPriorityEvaluation(unittest.TestCase): 222 | def test_first_none_argument(self): 223 | self.assertIsNone(PriorityEvaluator._first()) 224 | self.assertIsNone(PriorityEvaluator._first(None)) 225 | self.assertIsNone(PriorityEvaluator._first(None, False, 0, "", {}, [])) 226 | 227 | def test_first_priority(self): 228 | self.assertEqual(PriorityEvaluator._first(VALUE_CLI, VALUE_ENV), VALUE_CLI) 229 | self.assertEqual(PriorityEvaluator._first("", VALUE_ENV), VALUE_ENV) 230 | 231 | def test_k8s_namespace_default(self): 232 | context = {KEY_K8S_NAMESPACE: VALUE_CONTEXT} 233 | env = {KEY_K8S_NAMESPACE_ENV: VALUE_ENV} 234 | evaluator = PriorityEvaluator({}, context, env) 235 | 236 | self.assertEqual(evaluator.k8s_namespace_default(), VALUE_CONTEXT) 237 | self.assertEqual(evaluator.k8s_namespace_default(KUBECONFIG_NAMESPACE), VALUE_CONTEXT) 238 | 239 | context.pop(KEY_K8S_NAMESPACE) 240 | self.assertEqual(evaluator.k8s_namespace_default(), VALUE_ENV) 241 | self.assertEqual(evaluator.k8s_namespace_default(KUBECONFIG_NAMESPACE), KUBECONFIG_NAMESPACE) 242 | 243 | def test_k8s_client_configuration_missing_uri(self): 244 | evaluator = PriorityEvaluator({KEY_K8S_CA_BASE64: VALUE_CLI, KEY_K8S_TOKEN: VALUE_CLI}, {}, {}) 245 | 246 | with self.assertRaises(RuntimeError): 247 | evaluator.k8s_client_configuration() 248 | 249 | def test_k8s_client_configuration_missing_token(self): 250 | evaluator = PriorityEvaluator({ 251 | KEY_K8S_MASTER_URI: VALUE_CLI, 252 | KEY_K8S_CA_BASE64: VALUE_CLI, 253 | }, {}, {}) 254 | 255 | with self.assertRaises(RuntimeError): 256 | evaluator.k8s_client_configuration() 257 | 258 | def test_k8s_client_configuration_success(self): 259 | evaluator = PriorityEvaluator({ 260 | KEY_K8S_MASTER_URI: VALUE_CLI, 261 | KEY_K8S_CA_BASE64: VALUE_CA, 262 | KEY_K8S_TOKEN: VALUE_TOKEN, 263 | }, {}, {}) 264 | configuration = evaluator.k8s_client_configuration() 265 | self.assertEqual(configuration.host, VALUE_CLI) 266 | self.assertEqual(configuration.api_key, {'authorization': 'Bearer token'}) 267 | self.assertEqual(configuration.host, VALUE_CLI) 268 | self.assertFalse(configuration.debug) 269 | 270 | with open(configuration.ssl_ca_cert) as f: 271 | self.assertEqual(f.read(), 'CA\n') 272 | 273 | evaluator = PriorityEvaluator({ 274 | KEY_K8S_MASTER_URI: VALUE_CLI, 275 | KEY_K8S_CA_BASE64: VALUE_CA, 276 | KEY_K8S_TOKEN: VALUE_TOKEN, 277 | }, {KEY_K8S_HANDLE_DEBUG: 'true'}, {}) 278 | configuration = evaluator.k8s_client_configuration() 279 | self.assertTrue(configuration.debug) 280 | 281 | def test_environment_deprecated(self): 282 | evaluator = PriorityEvaluator({}, {}, {KEY_K8S_CA_BASE64_URI_ENV_DEPRECATED: VALUE_ENV_DEPRECATED}) 283 | self.assertTrue(evaluator.environment_deprecated()) 284 | evaluator = PriorityEvaluator({}, {}, {KEY_K8S_MASTER_URI_ENV_DEPRECATED: VALUE_ENV_DEPRECATED}) 285 | self.assertTrue(evaluator.environment_deprecated()) 286 | evaluator = PriorityEvaluator({}, {}, {KEY_K8S_CA_BASE64_URI_ENV_DEPRECATED: ''}) 287 | self.assertFalse(evaluator.environment_deprecated()) 288 | evaluator = PriorityEvaluator({}, {}, {KEY_K8S_MASTER_URI_ENV_DEPRECATED: ''}) 289 | self.assertFalse(evaluator.environment_deprecated()) 290 | -------------------------------------------------------------------------------- /tests/test_dictionary.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from k8s_handle import dictionary 4 | 5 | 6 | class TestDictionaryMerge(unittest.TestCase): 7 | def test_dictionary_merge(self): 8 | dictionary_x = { 9 | "0": "", 10 | 1: [0, 1, 2], 11 | 2: {"inflated_key_0": "inflated_value_0"}, 12 | 3: {0, 1} 13 | } 14 | 15 | dictionary_y = { 16 | "0": "override_0", 17 | 1: ["value_0", "value_1", "value_2"], 18 | 2: {"inflated_key_1": "inflated_value_1"}, 19 | 3: "override_1", 20 | } 21 | 22 | assert dictionary.merge(dictionary_x, dictionary_y) == { 23 | "0": "override_0", 24 | 1: ["value_0", "value_1", "value_2"], 25 | 2: 26 | { 27 | "inflated_key_0": "inflated_value_0", 28 | "inflated_key_1": "inflated_value_1" 29 | }, 30 | 3: "override_1" 31 | } 32 | -------------------------------------------------------------------------------- /tests/test_handlers.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | from unittest.mock import patch 4 | 5 | from k8s_handle import settings 6 | from k8s_handle import handler_deploy 7 | from kubernetes import client 8 | 9 | 10 | class TestDeployHandler(unittest.TestCase): 11 | 12 | def setUp(self): 13 | settings.CONFIG_FILE = 'tests/fixtures/config_with_env_vars.yaml' 14 | settings.TEMPLATES_DIR = 'templates/tests' 15 | os.environ['K8S_CONFIG_DIR'] = '/tmp/kube/' 16 | os.environ['SECTION1'] = 'not found' 17 | os.environ['SECTION'] = 'section-1' 18 | 19 | @patch('k8s_handle.templating.Renderer._generate_file') 20 | @patch('kubernetes.client.api.version_api.VersionApi.get_code_with_http_info') 21 | @patch('k8s_handle.k8s.provisioner.Provisioner.run') 22 | def test_api_exception_handling( 23 | self, 24 | mocked_provisioner_run, 25 | mocked_client_version_api_get_code, 26 | mocked_generate_file 27 | ): 28 | mocked_client_version_api_get_code.side_effect = client.exceptions.ApiException( 29 | 'Max retries exceeded with url: /version/' 30 | ) 31 | 32 | configs = { 33 | 'section': os.environ['SECTION'], 34 | 'config': settings.CONFIG_FILE, 35 | } 36 | # client.exceptions.ApiException should be handled 37 | handler_deploy(configs) 38 | -------------------------------------------------------------------------------- /tests/test_templating.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | import shutil 4 | import unittest 5 | from k8s_handle import settings 6 | from k8s_handle import config 7 | from k8s_handle import templating 8 | from k8s_handle.templating import TemplateRenderingError 9 | 10 | 11 | class TestTemplating(unittest.TestCase): 12 | def setUp(self): 13 | settings.CONFIG_FILE = 'tests/fixtures/config.yaml' 14 | settings.TEMPLATES_DIR = 'templates/tests' 15 | os.environ['CUSTOM_ENV'] = 'My value' 16 | os.environ['K8S_CONFIG_DIR'] = '/tmp/kube/' 17 | 18 | def tearDown(self): 19 | if os.path.exists(settings.TEMP_DIR): 20 | shutil.rmtree(settings.TEMP_DIR) 21 | 22 | os.environ.pop('CUSTOM_ENV') 23 | os.environ.pop('K8S_CONFIG_DIR') 24 | 25 | def test_renderer_init(self): 26 | r = templating.Renderer('/tmp/test') 27 | self.assertEqual(r._templates_dir, '/tmp/test') 28 | 29 | def test_none_context(self): 30 | r = templating.Renderer('templates') 31 | with self.assertRaises(RuntimeError) as context: 32 | r.generate_by_context(None) 33 | self.assertTrue('Can\'t generate templates from None context' in str(context.exception), str(context.exception)) 34 | 35 | def test_generate_templates(self): 36 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 37 | context = config.load_context_section('test_dirs') 38 | r.generate_by_context(context) 39 | file_path_1 = '{}/template1.yaml'.format(settings.TEMP_DIR) 40 | file_path_2 = '{}/template2.yaml'.format(settings.TEMP_DIR) 41 | file_path_3 = '{}/template3.yaml'.format(settings.TEMP_DIR) 42 | file_path_4 = '{}/innerdir/template1.yaml'.format(settings.TEMP_DIR) 43 | file_path_5 = '{}/template_include_file.yaml'.format(settings.TEMP_DIR) 44 | file_path_6 = '{}/template_list_files.yaml'.format(settings.TEMP_DIR) 45 | self.assertTrue(os.path.exists(file_path_1)) 46 | self.assertTrue(os.path.exists(file_path_2)) 47 | self.assertTrue(os.path.exists(file_path_3)) 48 | with open(file_path_1, 'r') as f: 49 | content = f.read() 50 | self.assertEqual(content, "{'ha_ha': 'included_var'}") 51 | with open(file_path_2, 'r') as f: 52 | content = f.read() 53 | self.assertEqual(content, 'TXkgdmFsdWU=') 54 | with open(file_path_3, 'r') as f: 55 | content = f.read() 56 | self.assertEqual(content, 'My value') 57 | with open(file_path_4, 'r') as f: 58 | content = f.read() 59 | self.assertEqual(content, "{'ha_ha': 'included_var'}") 60 | with open(file_path_5, 'r') as f: 61 | content = f.read() 62 | self.assertEqual(content, "test: |\n {{ hello world }}\n new\n line\n {{ hello world1 }}\n") 63 | with open(file_path_6, 'r') as f: 64 | content = f.read() 65 | self.assertEqual(content, "test: |\n template1.yaml.j2:\n my_file.txt:\n my_file1.txt:\n ") 66 | 67 | def test_no_templates_in_kubectl(self): 68 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 69 | with self.assertRaises(RuntimeError) as context: 70 | r.generate_by_context(config.load_context_section('no_templates')) 71 | self.assertTrue('Templates section doesn\'t have any template items' in str(context.exception)) 72 | 73 | def test_render_not_existent_template(self): 74 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 75 | with self.assertRaises(TemplateRenderingError) as context: 76 | r.generate_by_context(config.load_context_section('not_existent_template')) 77 | self.assertTrue('doesnotexist.yaml.j2' in str(context.exception), context.exception) 78 | 79 | def test_generate_templates_with_kubectl_section(self): 80 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 81 | context = config.load_context_section('section_with_kubectl') 82 | r.generate_by_context(context) 83 | file_path_1 = '{}/template1.yaml'.format(settings.TEMP_DIR) 84 | file_path_2 = '{}/template2.yaml'.format(settings.TEMP_DIR) 85 | file_path_3 = '{}/template3.yaml'.format(settings.TEMP_DIR) 86 | file_path_4 = '{}/innerdir/template1.yaml'.format(settings.TEMP_DIR) 87 | self.assertTrue(os.path.exists(file_path_1)) 88 | self.assertTrue(os.path.exists(file_path_2)) 89 | self.assertTrue(os.path.exists(file_path_3)) 90 | with open(file_path_1, 'r') as f: 91 | content = f.read() 92 | self.assertEqual(content, "{'ha_ha': 'included_var'}") 93 | with open(file_path_2, 'r') as f: 94 | content = f.read() 95 | self.assertEqual(content, 'TXkgdmFsdWU=') 96 | with open(file_path_3, 'r') as f: 97 | content = f.read() 98 | self.assertEqual(content, 'My value') 99 | with open(file_path_4, 'r') as f: 100 | content = f.read() 101 | self.assertEqual(content, "{'ha_ha': 'included_var'}") 102 | 103 | def test_io_2709(self): 104 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 105 | with self.assertRaises(TemplateRenderingError) as context: 106 | c = config.load_context_section('io_2709') 107 | r.generate_by_context(c) 108 | self.assertTrue('due to: \'undefined_variable\' is undefined' in str(context.exception)) 109 | 110 | def test_evaluate_tags(self): 111 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 112 | tags = {'tag1', 'tag2', 'tag3'} 113 | self.assertTrue(r._evaluate_tags(tags, only_tags=['tag1'], skip_tags=None)) 114 | self.assertFalse(r._evaluate_tags(tags, only_tags=['tag4'], skip_tags=None)) 115 | self.assertFalse(r._evaluate_tags(tags, only_tags=['tag1'], skip_tags=['tag1'])) 116 | self.assertFalse(r._evaluate_tags(tags, only_tags=None, skip_tags=['tag1'])) 117 | self.assertTrue(r._evaluate_tags(tags, only_tags=None, skip_tags=['tag4'])) 118 | tags = set() 119 | self.assertFalse(r._evaluate_tags(tags, only_tags=['tag4'], skip_tags=None)) 120 | self.assertTrue(r._evaluate_tags(tags, only_tags=None, skip_tags=['tag4'])) 121 | 122 | def test_get_template_tags(self): 123 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 124 | template_1 = {'template': 'template.yaml.j2', 'tags': ['tag1', 'tag2', 'tag3']} 125 | template_2 = {'template': 'template.yaml.j2', 'tags': 'tag1,tag2,tag3'} 126 | template_3 = {'template': 'template.yaml.j2', 'tags': ['tag1']} 127 | template_4 = {'template': 'template.yaml.j2', 'tags': 'tag1'} 128 | self.assertEqual(r._get_template_tags(template_1), {'tag1', 'tag2', 'tag3'}) 129 | self.assertEqual(r._get_template_tags(template_2), {'tag1', 'tag2', 'tag3'}) 130 | self.assertEqual(r._get_template_tags(template_3), {'tag1'}) 131 | self.assertEqual(r._get_template_tags(template_4), {'tag1'}) 132 | 133 | def test_get_template_tags_unexpected_type(self): 134 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 135 | template = {'template': 'template.yaml.j2', 'tags': {'tag': 'unexpected'}} 136 | with self.assertRaises(TypeError) as context: 137 | r._get_template_tags(template) 138 | self.assertTrue('unexpected type' in str(context.exception)) 139 | 140 | def test_generate_group_templates(self): 141 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 142 | context = config.load_context_section('test_groups') 143 | r.generate_by_context(context) 144 | file_path_1 = '{}/template1.yaml'.format(settings.TEMP_DIR) 145 | file_path_2 = '{}/template2.yaml'.format(settings.TEMP_DIR) 146 | file_path_3 = '{}/template3.yaml'.format(settings.TEMP_DIR) 147 | self.assertTrue(os.path.exists(file_path_1)) 148 | self.assertTrue(os.path.exists(file_path_2)) 149 | self.assertTrue(os.path.exists(file_path_3)) 150 | 151 | def test_templates_regex(self): 152 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 153 | context = config.load_context_section('templates_regex') 154 | file_path_1 = '{}/template1.yaml'.format(settings.TEMP_DIR) 155 | file_path_2 = '{}/template2.yaml'.format(settings.TEMP_DIR) 156 | file_path_3 = '{}/template3.yaml'.format(settings.TEMP_DIR) 157 | file_path_4 = '{}/template4.yaml'.format(settings.TEMP_DIR) 158 | file_path_5 = '{}/innerdir/template1.yaml'.format(settings.TEMP_DIR) 159 | file_path_6 = '{}/template_include_file.yaml'.format(settings.TEMP_DIR) 160 | r.generate_by_context(context) 161 | self.assertTrue(os.path.exists(file_path_1)) 162 | self.assertFalse(os.path.exists(file_path_2)) 163 | self.assertFalse(os.path.exists(file_path_3)) 164 | self.assertFalse(os.path.exists(file_path_4)) 165 | self.assertTrue(os.path.exists(file_path_5)) 166 | self.assertFalse(os.path.exists(file_path_6)) 167 | 168 | def test_templates_regex_parse_failed(self): 169 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 170 | c = config.load_context_section('templates_regex_invalid') 171 | with self.assertRaises(TemplateRenderingError) as context: 172 | r.generate_by_context(c) 173 | self.assertTrue('Processing [: template [ hasn\'t been found' in str(context.exception)) 174 | 175 | def test_filters(self): 176 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 177 | context = config.load_context_section('test_filters') 178 | r.generate_by_context(context) 179 | result = '{}/filters.yaml'.format(settings.TEMP_DIR) 180 | with open(result, 'r') as f: 181 | actual = yaml.safe_load(f) 182 | self.assertEqual('aGVsbG8gd29ybGQ=', actual.get('b64encode')) 183 | self.assertEqual('k8s-handle', actual.get('b64decode')) 184 | self.assertEqual('8fae6dd899aace000fd494fd6d795e26e2c85bf8e59d4262ef56b03dc91e924c', actual.get('sha256')) 185 | affinity = [ 186 | {'effect': 'NoSchedule', 'key': 'dedicated', 'operator': 'Equal', 'value': 'monitoring'}, 187 | {'effect': 'NoSchedule', 'key': 'dedicated', 'operator': 'Equal', 'value': {'hello': 'world'}} 188 | ] 189 | self.assertEqual(affinity, actual.get('affinity')) 190 | 191 | def test_dashes(self): 192 | r = templating.Renderer(os.path.join(os.path.dirname(__file__), 'templates_tests')) 193 | context = config.load_context_section('test_dashes') 194 | r.generate_by_context(context) 195 | result = '{}/template-dashes.yaml'.format(settings.TEMP_DIR) 196 | with open(result, 'r') as f: 197 | actual = yaml.safe_load(f) 198 | self.assertEqual('do this', actual) 199 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | skipsdist = True 3 | envlist = py311 4 | 5 | [testenv] 6 | deps = -r {toxinidir}/requirements.txt 7 | flake8 8 | coveralls 9 | 10 | setenv = LOG_LEVEL=DEBUG 11 | 12 | commands = flake8 13 | coverage erase 14 | coverage run --source=. --rcfile=.coveragerc -m unittest 15 | coverage report 16 | coverage html 17 | 18 | [flake8] 19 | show-source = true 20 | exclude = .tox/* 21 | max-line-length = 120 22 | --------------------------------------------------------------------------------