├── .coveragerc ├── .github └── workflows │ └── python-package.yml ├── .gitignore ├── .readthedocs.yaml ├── LICENSE ├── README.md ├── docs ├── async-usage.md ├── codecs.md ├── configuration.md ├── css │ └── custom.css ├── custom-resources.md ├── generic-resources.md ├── index.md ├── list-watch.md ├── reference │ ├── async_client.md │ ├── client.md │ ├── configuration.md │ ├── exceptions.md │ └── types.md ├── requirements.txt ├── resources-and-models.md ├── selectors.md └── utils.md ├── e2e-tests ├── test-crd.yaml └── test_client.py ├── lightkube ├── __init__.py ├── codecs.py ├── config │ ├── __init__.py │ ├── client_adapter.py │ ├── kubeconfig.py │ └── models.py ├── core │ ├── __init__.py │ ├── async_client.py │ ├── client.py │ ├── dataclasses_dict.py │ ├── exceptions.py │ ├── generic_client.py │ ├── internal_models.py │ ├── internal_resources.py │ ├── resource.py │ ├── resource_registry.py │ ├── schema.py │ ├── selector.py │ ├── sort_objects.py │ └── typing_extra.py ├── exceptions.py ├── generic_resource.py ├── operators.py ├── py.typed ├── types.py └── utils │ ├── __init__.py │ └── quantity.py ├── mkdocs.yml ├── pytest.ini ├── requirements.txt ├── setup-test-env.sh ├── setup.py └── tests ├── __init__.py ├── data ├── auth_script.sh ├── clientkey.pem ├── clientreq.pem ├── example-def-null.yaml ├── example-def-with-lists.yaml ├── example-def-with-nulls.yaml ├── example-def.tmpl ├── example-def.yaml └── example-multi-version-crd.yaml ├── test_async_client.py ├── test_client.py ├── test_client_adapter.py ├── test_codecs.py ├── test_config.py ├── test_config.yaml ├── test_config_default_user.yaml ├── test_config_exec.yaml ├── test_config_user_password.yaml ├── test_dataclasses_dict.py ├── test_generic_resource.py ├── test_operators.py ├── test_quantity.py ├── test_resource_registry.py ├── test_selector.py └── test_sort_objects.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | exclude_lines = 3 | pragma: not covered 4 | @overload 5 | -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Python package 5 | 6 | on: 7 | push: 8 | branches: [ master ] 9 | pull_request: 10 | branches: [ master ] 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-24.04 # ubuntu-latest 15 | strategy: 16 | matrix: 17 | python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] 18 | 19 | steps: 20 | - uses: actions/checkout@v2 21 | - name: Set up Python ${{ matrix.python-version }} 22 | uses: actions/setup-python@v2 23 | with: 24 | python-version: ${{ matrix.python-version }} 25 | - name: Install dependencies 26 | run: | 27 | python -m pip install --upgrade pip 28 | pip install flake8 pytest coveralls wheel pytest-asyncio jinja2 respx asyncmock setuptools 29 | pip install -e . 30 | ./setup-test-env.sh 31 | - name: Lint with flake8 32 | run: | 33 | # stop the build if there are Python syntax errors or undefined names 34 | flake8 lightkube/ --count --select=E9,F63,F7,F82 --show-source --statistics 35 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 36 | flake8 lightkube/ --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 37 | - name: Test with pytest 38 | run: | 39 | coverage run --source=lightkube -m pytest tests/ 40 | GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }} coveralls --service=github 41 | - name: Package (bdist_wheel) 42 | run: | 43 | python setup.py bdist_wheel 44 | - name: Package (sdist) 45 | run: | 46 | python setup.py sdist 47 | - name: Archive artifact 48 | uses: actions/upload-artifact@v4 49 | with: 50 | name: dist-${{ matrix.python-version }} 51 | path: | 52 | dist 53 | e2e-test: 54 | strategy: 55 | fail-fast: false 56 | matrix: 57 | k8s: [ '1.26', '1.30' ] 58 | name: E2E test in K8s ${{ matrix.k8s }} 59 | runs-on: ubuntu-24.04 60 | steps: 61 | - uses: actions/checkout@v2 62 | - name: Set up Python 63 | uses: actions/setup-python@v2 64 | with: 65 | python-version: '3.10' 66 | - name: Install dependencies 67 | run: | 68 | python -m pip install --upgrade pip 69 | pip install pytest pytest-asyncio jinja2 wheel 70 | pip install . "lightkube-models==${{ matrix.k8s }}.*" 71 | - uses: nolar/setup-k3d-k3s@v1 72 | with: 73 | version: v${{ matrix.k8s }} 74 | - run: kubectl version 75 | - run: pytest e2e-tests/ 76 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | _env* 2 | lightkube/models 3 | lightkube/resources 4 | __pycache__ 5 | .idea 6 | _build/ 7 | build/ 8 | dist/ 9 | *.egg-info 10 | site/ 11 | htmlcov/ 12 | .vscode 13 | .devcontainer 14 | 15 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | python: "3.11" 7 | 8 | mkdocs: 9 | configuration: mkdocs.yml 10 | fail_on_warning: false 11 | 12 | python: 13 | install: 14 | - requirements: requirements.txt 15 | - requirements: docs/requirements.txt 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Giuseppe Tribulato 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # lightkube 2 | 3 | ![](https://img.shields.io/github/actions/workflow/status/gtsystem/lightkube/python-package.yml?branch=master) 4 | [![Coverage Status](https://coveralls.io/repos/github/gtsystem/lightkube/badge.svg?branch=master)](https://coveralls.io/github/gtsystem/lightkube?branch=master) 5 | [![pypi supported versions](https://img.shields.io/pypi/pyversions/lightkube.svg)](https://pypi.python.org/pypi/lightkube) 6 | 7 | Modern lightweight kubernetes module for python 8 | 9 | 10 | ## Highlights 11 | 12 | * *Simple* interface shared across all kubernetes APIs. 13 | * Extensive *type hints* to avoid common mistakes and to support autocompletion. 14 | * Models and resources generated from the swagger specifications using standard dataclasses. 15 | * Load/Dump resource objects from YAML. 16 | * Support for async/await 17 | * Support for installing a specific version of the kubernetes models (1.18 to 1.33) 18 | * Lazy instantiation of inner models. 19 | * Fast startup and small memory footprint as only needed models and resources can be imported. 20 | * Automatic handling of pagination when listing resources. 21 | 22 | This module is powered by [httpx](https://github.com/encode/httpx/tree/master/httpx). 23 | 24 | ## Installation 25 | 26 | This module requires python >= 3.8 27 | 28 | pip install lightkube 29 | 30 | ## Usage 31 | 32 | Read a pod 33 | 34 | ```python 35 | from lightkube import Client 36 | from lightkube.resources.core_v1 import Pod 37 | 38 | client = Client() 39 | pod = client.get(Pod, name="my-pod", namespace="default") 40 | print(pod.namespace.uid) 41 | ``` 42 | 43 | List nodes 44 | ```python 45 | from lightkube import Client 46 | from lightkube.resources.core_v1 import Node 47 | 48 | client = Client() 49 | for node in client.list(Node): 50 | print(node.metadata.name) 51 | ``` 52 | 53 | Watch deployments 54 | ```python 55 | from lightkube import Client 56 | from lightkube.resources.apps_v1 import Deployment 57 | 58 | client = Client() 59 | for op, dep in client.watch(Deployment, namespace="default"): 60 | print(f"{dep.namespace.name} {dep.spec.replicas}") 61 | ``` 62 | 63 | Create a config map 64 | ```python 65 | from lightkube.resources.core_v1 import ConfigMap 66 | from lightkube.models.meta_v1 import ObjectMeta 67 | 68 | config = ConfigMap( 69 | metadata=ObjectMeta(name='my-config', namespace='default'), 70 | data={'key1': 'value1', 'key2': 'value2'} 71 | ) 72 | 73 | client.create(config) 74 | ``` 75 | 76 | Replace the previous config with a different content 77 | ```python 78 | config.data['key1'] = 'new value' 79 | client.replace(config) 80 | ``` 81 | 82 | Patch an existing config adding a label 83 | ```python 84 | patch = {'metadata': {'labels': {'app': 'xyz'}}} 85 | client.patch(ConfigMap, name='my-config', namespace='default', obj=patch) 86 | ``` 87 | 88 | Remove the label `app` 89 | ```python 90 | # When using PatchType.STRATEGIC (default), setting a value of a key/value to None, will remove the current item 91 | patch = {'metadata': {'labels': {'app': None}}} 92 | client.patch(ConfigMap, name='my-config', namespace='default', obj=patch) 93 | ``` 94 | 95 | Delete a namespaced resource 96 | ```python 97 | client.delete(ConfigMap, name='my-config', namespace='default') 98 | ``` 99 | 100 | Create resources defined in a file 101 | ```python 102 | from lightkube import Client, codecs 103 | 104 | client = Client() 105 | with open('deployment.yaml') as f: 106 | for obj in codecs.load_all_yaml(f): 107 | client.create(obj) 108 | ``` 109 | 110 | Scale a deployment 111 | ```python 112 | from lightkube.resources.apps_v1 import Deployment 113 | from lightkube.models.meta_v1 import ObjectMeta 114 | from lightkube.models.autoscaling_v1 import ScaleSpec 115 | 116 | obj = Deployment.Scale( 117 | metadata=ObjectMeta(name='metrics-server', namespace='kube-system'), 118 | spec=ScaleSpec(replicas=1) 119 | ) 120 | client.replace(obj) 121 | ``` 122 | 123 | Update Status of a deployment 124 | ```python 125 | from lightkube.resources.apps_v1 import Deployment 126 | from lightkube.models.apps_v1 import DeploymentStatus 127 | 128 | obj = Deployment.Status( 129 | status=DeploymentStatus(observedGeneration=99) 130 | ) 131 | client.apply(obj, name='metrics-server', namespace='kube-system') 132 | ``` 133 | 134 | 135 | Create and modify resources using [server side apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/) 136 | 137 | *Note:* `field_manager` is required for server-side apply. You can specify it once in the client constructor 138 | or when calling `apply()`. Also `apiVersion` and `kind` need to be provided as part of 139 | the object definition. 140 | 141 | ```python 142 | from lightkube.resources.core_v1 import ConfigMap 143 | from lightkube.models.meta_v1 import ObjectMeta 144 | 145 | client = Client(field_manager="my-manager") 146 | config = ConfigMap( 147 | # note apiVersion and kind need to be specified for server-side apply 148 | apiVersion='v1', kind='ConfigMap', 149 | metadata=ObjectMeta(name='my-config', namespace='default'), 150 | data={'key1': 'value1', 'key2': 'value2'} 151 | ) 152 | 153 | res = client.apply(config) 154 | print(res.data) 155 | # prints {'key1': 'value1', 'key2': 'value2'} 156 | 157 | del config.data['key1'] 158 | config.data['key3'] = 'value3' 159 | 160 | res = client.apply(config) 161 | print(res.data) 162 | # prints {'key2': 'value2', 'key3': 'value3'} 163 | ``` 164 | 165 | Stream pod logs 166 | ```python 167 | from lightkube import Client 168 | 169 | client = Client() 170 | for line in client.log('my-pod', follow=True): 171 | print(line) 172 | ``` 173 | 174 | ## Unsupported features 175 | 176 | The following features are not supported at the moment: 177 | 178 | * Special subresources `attach`, `exec`, `portforward` and `proxy`. 179 | * `auth-provider` authentication method is not supported. The supported 180 | authentication methods are `token`, `username` + `password` and `exec`. -------------------------------------------------------------------------------- /docs/async-usage.md: -------------------------------------------------------------------------------- 1 | # AsyncClient usage 2 | 3 | The AsyncClient allows to perform the same operation that are possible using the Client but in 4 | an asycronous way: 5 | 6 | * The operations `create`, `delete`, `deletecollection`, `patch`, `replace`, `get` return a coroutine and need to be used with `await ...`. 7 | * The operations `list` and `watch` return an asynchronous iterable and can be used with `async for ...`. 8 | 9 | ## Examples 10 | 11 | Read a pod 12 | 13 | ```python 14 | from lightkube import AsyncClient 15 | from lightkube.resources.core_v1 import Pod 16 | 17 | async def example(): 18 | client = AsyncClient() 19 | pod = await client.get(Pod, name="my-pod", namespace="default") 20 | print(pod.namespace.uid) 21 | ``` 22 | 23 | List nodes 24 | ```python 25 | from lightkube import AsyncClient 26 | from lightkube.resources.core_v1 import Node 27 | 28 | async def example(): 29 | client = AsyncClient() 30 | async for node in client.list(Node): 31 | print(node.metadata.name) 32 | ``` 33 | 34 | Watch deployments 35 | ```python 36 | from lightkube import AsyncClient 37 | from lightkube.resources.apps_v1 import Deployment 38 | 39 | async def example(): 40 | client = AsyncClient() 41 | async for op, dep in client.watch(Deployment, namespace="default"): 42 | print(f"{dep.namespace.name} {dep.spec.replicas}") 43 | ``` 44 | 45 | Create a config map 46 | ```python 47 | from lightkube import AsyncClient 48 | from lightkube.resources.core_v1 import ConfigMap 49 | from lightkube.models.meta_v1 import ObjectMeta 50 | 51 | async def example(): 52 | client = AsyncClient() 53 | config = ConfigMap( 54 | metadata=ObjectMeta(name='my-config', namespace='default'), 55 | data={'key1': 'value1', 'key2': 'value2'} 56 | ) 57 | 58 | await client.create(config) 59 | ``` 60 | 61 | Replace the previous config with a different content 62 | ```python 63 | config.data['key1'] = 'new value' 64 | await client.replace(config) 65 | ``` 66 | 67 | Patch an existing config 68 | ```python 69 | patch = {'metadata': {'labels': {'app': 'xyz'}}} 70 | await client.patch(ConfigMap, name='my-config', namespace='default', obj=patch) 71 | ``` 72 | 73 | Delete a namespaced resource 74 | ```python 75 | await client.delete(ConfigMap, name='my-config', namespace='default') 76 | ``` 77 | 78 | Scale a deployment 79 | ```python 80 | from lightkube import AsyncClient 81 | from lightkube.resources.apps_v1 import Deployment 82 | from lightkube.models.meta_v1 import ObjectMeta 83 | from lightkube.models.autoscaling_v1 import ScaleSpec 84 | 85 | async def example(): 86 | client = AsyncClient() 87 | obj = Deployment.Scale( 88 | metadata=ObjectMeta(name='metrics-server', namespace='kube-system'), 89 | spec=ScaleSpec(replicas=1) 90 | ) 91 | await client.replace(obj, 'metrics-server', namespace='kube-system') 92 | ``` 93 | 94 | Update Status of a deployment 95 | ```python 96 | from lightkube import AsyncClient 97 | from lightkube.resources.apps_v1 import Deployment 98 | from lightkube.models.apps_v1 import DeploymentStatus 99 | 100 | async def example(): 101 | client = AsyncClient() 102 | obj = Deployment.Status( 103 | status=DeploymentStatus(observedGeneration=99) 104 | ) 105 | await client.apply(obj, name='metrics-server', namespace='kube-system') 106 | ``` 107 | 108 | 109 | Stream pod logs 110 | ```python 111 | from lightkube import AsyncClient 112 | 113 | async def example(): 114 | client = AsyncClient() 115 | async for line in client.log('my-pod', follow=True): 116 | print(line) 117 | ``` 118 | -------------------------------------------------------------------------------- /docs/codecs.md: -------------------------------------------------------------------------------- 1 | # Load/Dump kubernetes objects 2 | 3 | ## Convert models from/to dict 4 | 5 | All lightkube models allow to convert from/to dicts using the methods `.from_dict()` and 6 | `.to_dict()`. For example you can create an `ObjectMeta` with 7 | 8 | ```python 9 | from lightkube.models.meta_v1 import ObjectMeta 10 | meta = ObjectMeta.from_dict({'name': 'my-name', 'labels': {'key': 'value'}}) 11 | ``` 12 | 13 | and transform it back with 14 | 15 | ```python 16 | meta_dict = meta.to_dict() 17 | ``` 18 | 19 | Dict representations can then be serialized/deserialized in JSON or YAML. 20 | 21 | ## Load resource objects 22 | 23 | It is possible to load dynamically a resource object using the function `lightkube.codecs.from_dict()` 24 | 25 | ```python 26 | from lightkube import codecs 27 | 28 | obj = codecs.from_dict({ 29 | 'apiVersion': 'v1', 30 | 'kind': 'ConfigMap', 31 | 'metadata': {'name': 'config-name', 'labels': {'label1': 'value1'}}, 32 | 'data': { 33 | 'file1.txt': 'some content here', 34 | 'file2.txt': 'some other content' 35 | } 36 | }) 37 | print(type(obj)) 38 | ``` 39 | 40 | Output: `` 41 | 42 | !!! note 43 | Only known resources can be loaded. These are either kubernetes [standard resources](resources-and-models.md) 44 | or [generic resources](generic-resources.md) manually defined. You can register further resources using 45 | the [`resource_registry`](#resource-registry). 46 | 47 | ## Load from YAML 48 | 49 | Kubernetes resource defined in a YAML file can be easily loaded using the following function: 50 | 51 | ::: lightkube.codecs.load_all_yaml 52 | options: 53 | heading_level: 3 54 | 55 | ### Example 56 | 57 | ```python 58 | from lightkube import Client, codecs 59 | 60 | client = Client() 61 | with open('deployment.yaml') as f: 62 | for obj in codecs.load_all_yaml(f): 63 | client.create(obj) 64 | ``` 65 | 66 | !!! note 67 | Only defined resources can be loaded. These are either kubernetes [standard resources](resources-and-models.md) 68 | or [generic resources](generic-resources.md) manually defined. 69 | 70 | If we have a YAML file that both defines a CRD and loads an instance of it, we can use `create_resources_for_crds=True`, like: 71 | 72 | ```python 73 | from lightkube import Client, codecs 74 | 75 | client = Client() 76 | with open('file-with-crd-and-instance.yaml') as f: 77 | for obj in codecs.load_all_yaml(f, create_resources_for_crds=True): 78 | client.create(obj) 79 | ``` 80 | 81 | This results in a generic resource being created for any CustomResourceDefinition in the YAML file. 82 | 83 | It is also possible to create resources from a [jinja2](https://jinja.palletsprojects.com) template 84 | passing the parameter `context`. 85 | 86 | For example assuming `service.tmpl` has the following content: 87 | ```yaml 88 | apiVersion: v1 89 | kind: Service 90 | metadata: 91 | name: nginx 92 | labels: 93 | run: my-nginx 94 | env: {{env}} 95 | spec: 96 | type: NodePort 97 | ports: 98 | - port: 8080 99 | targetPort: 80 100 | protocol: TCP 101 | selector: 102 | run: my-nginx 103 | env: {{env}} 104 | ``` 105 | 106 | can be used as follow: 107 | ```python 108 | with open('service.tmpl') as f: 109 | # render the template using `context` and return the corresponding resource objects. 110 | objs = codecs.load_all_yaml(f, context={'env': 'prd'}) 111 | print(objs[0].metadata.labels['env']) # prints `prd` 112 | ``` 113 | 114 | ## Dump to YAML 115 | 116 | The function `lightkube.codecs.dump_all_yaml(...)` can be used to dump resource objects as YAML. 117 | 118 | ::: lightkube.codecs.dump_all_yaml 119 | options: 120 | heading_level: 3 121 | 122 | ### Example 123 | 124 | ```python 125 | from lightkube.resources.core_v1 import ConfigMap 126 | from lightkube.models.meta_v1 import ObjectMeta 127 | from lightkube import codecs 128 | 129 | cm = ConfigMap( 130 | apiVersion='v1', kind='ConfigMap', 131 | metadata=ObjectMeta(name='xyz', labels={'x': 'y'}) 132 | ) 133 | with open('deployment-out.yaml', 'w') as fw: 134 | codecs.dump_all_yaml([cm], fw) 135 | ``` 136 | 137 | ## Sorting resource objects 138 | 139 | Sometimes you have a manifest of resources where some depend on others. For example, 140 | consider, the following `yaml_with_dependencies.yaml` file: 141 | 142 | ```yaml 143 | kind: ClusterRoleBinding 144 | roleRef: 145 | kind: ClusterRole 146 | name: example-cluster-role-binding 147 | subjects: 148 | - kind: ServiceAccount 149 | name: example-service-account 150 | ... 151 | --- 152 | kind: ClusterRole 153 | metadata: 154 | name: example-cluster-role 155 | ... 156 | --- 157 | kind: ServiceAccount 158 | metadata: 159 | name: example-service-account 160 | ``` 161 | 162 | where we have a `ClusterRoleBinding` that uses a `ClusterRole` and `ServiceAccount`. 163 | In cases like this, the order in which we `apply` these resources matters as the 164 | `ClusterRoleBinding` depends on the others. To sort these objects so that we do not 165 | encounter API errors when `apply`ing them, use `sort_objects(...)`. 166 | 167 | ::: lightkube.sort_objects 168 | options: 169 | heading_level: 3 170 | 171 | Revisiting the example above, we can apply from `yaml_with_dependencies.yaml` by: 172 | 173 | ```python 174 | from lightkube import Client, codecs, sort_objects 175 | 176 | client = Client() 177 | with open('yaml_with_dependencies.yaml') as f: 178 | objects = codecs.load_all_yaml(f) 179 | for obj in sort_objects(objects): 180 | client.create(obj) 181 | ``` 182 | 183 | `sort_objects` orders the objects in a way that is friendly to applying them as a 184 | batch, allowing us to loop through them as normal. 185 | 186 | Similarly, problems can arise when deleting a batch of objects. For example, 187 | consider the manifest `crs_and_crds.yaml`: 188 | 189 | ```yaml 190 | apiVersion: apiextensions.k8s.io/v1 191 | kind: CustomResourceDefinition 192 | ... 193 | spec: 194 | names: 195 | kind: SomeNewCr 196 | ... 197 | --- 198 | kind: SomeNewCr 199 | metadata: 200 | name: instance-of-new-cr 201 | ``` 202 | 203 | Deleting this in a loop like above would first delete the `CustomResourceDefinition`, 204 | resulting in all instances of `SomeNewCr` to be deleted implicitly. When we then 205 | attempted to delete `instance-of-new-cr`, we would encounter an API error. 206 | Use `codecs.sort_objects(..., reverse=True)` to avoid this issue: 207 | 208 | ```python 209 | from lightkube import Client, codecs, sort_objects 210 | 211 | client = Client() 212 | with open('crs_amd_crds.yaml') as f: 213 | objects = codecs.load_all_yaml(f) 214 | for obj in sort_objects(objects, reverse=True): 215 | client.create(obj) 216 | ``` 217 | 218 | This orders the objects in a way that is friendly for deleting them as a batch. 219 | 220 | ## Resource Registry 221 | 222 | ::: lightkube.codecs.resource_registry 223 | options: 224 | heading_level: 3 225 | 226 | The singleton `resource_registry` allows to register a custom resource, so that it can be used by the load 227 | functions on this module: 228 | 229 | ```python 230 | from lightkube import codecs 231 | 232 | codecs.resource_registry.register(MyCustomResource) 233 | 234 | with open('service.yaml') as f: 235 | # Now `MyCustomResource` can be loaded 236 | objs = codecs.load_all_yaml(f) 237 | ``` 238 | 239 | `register` can also be used as a decorator: 240 | ```python 241 | from lightkube.core.resource import NamespacedResource 242 | from lightkube.codecs import resource_registry 243 | 244 | @resource_registry.register 245 | class MyCustomResource(NamespacedResource): 246 | ... 247 | ``` 248 | 249 | 250 | -------------------------------------------------------------------------------- /docs/configuration.md: -------------------------------------------------------------------------------- 1 | # Configuration 2 | 3 | Similar to other Kubernetes libraries and the `kubectl` CLI tool, 4 | lightkube utilizes the kubeconfig file to configure the connection 5 | with Kubernetes. 6 | 7 | The kubernetes configuration is represented by the class [lightkube.KubeConfig][]. 8 | 9 | ## Load the configuration from a file 10 | 11 | The constructor `KubeConfig.from_file()` is used to load a specific configuration from the filesystem (which needs to follow 12 | the standard YAML kubeconfig format). 13 | 14 | Example: 15 | 16 | ```python 17 | from lightkube import KubeConfig, Client 18 | 19 | config = KubeConfig.from_file("path/to/my/config") 20 | client = Client(config=config) 21 | ``` 22 | 23 | Notice that we didn't select a context. By default the client will pick the current context. This is in fact equivalent to 24 | 25 | ```python 26 | from lightkube import KubeConfig, Client 27 | 28 | config = KubeConfig.from_file("path/to/my/config") 29 | client = Client(config=config.get()) # pick the current context 30 | ``` 31 | 32 | The method `.get()` of KubeConfig is used to select a specific `cluster` and `user` configuration given a defined context. 33 | Without parameters the current context is assumed. A different context can be also used as follow 34 | 35 | ```python 36 | # use the context named my-context 37 | client = Client(config=config.get(context_name='my-context')) 38 | ``` 39 | 40 | ## Load in-cluster configuration 41 | 42 | The constructor `KubeConfig.from_service_account()` is used to build a configuration starting from the service account 43 | data exposed inside a pod running on the cluster: 44 | 45 | ```python 46 | from lightkube import KubeConfig, Client 47 | 48 | config = KubeConfig.from_service_account() 49 | client = Client(config=config) 50 | ``` 51 | 52 | ## Auto-detect configuration from the environment 53 | 54 | By default lightkube will do his best to detect the configuration looking 55 | at the environment. 56 | 57 | ```python 58 | import lightkube 59 | 60 | client = lightkube.Client() # no configuration provided 61 | ``` 62 | 63 | is equivalent to 64 | 65 | ```python 66 | from lightkube import KubeConfig, Client 67 | 68 | config = KubeConfig.from_env() 69 | client = Client(config=config) 70 | ``` 71 | 72 | `KubeConfig.from_env()` will attempt to load a configuration using the following order: 73 | 74 | * in-cluster config. 75 | * config file defined in `KUBECONFIG` environment variable. 76 | * configuration file present on the default location (`~/.kube/config`). 77 | 78 | ## Proxy configuration 79 | 80 | The constructor `KubeConfig.from_server()` will build a simple configuration useful to connect to a non protected 81 | Kubernetes API. This is for example useful to tunnel API calls using kubectl proxy: 82 | 83 | ```bash 84 | kubectl proxy --port=8080 85 | ``` 86 | 87 | ```python 88 | from lightkube import KubeConfig, Client 89 | 90 | config = KubeConfig.from_server("http://localhost:8080") 91 | client = Client(config=config) 92 | ``` 93 | -------------------------------------------------------------------------------- /docs/css/custom.css: -------------------------------------------------------------------------------- 1 | div.autodoc-docstring { 2 | padding-left: 16px; 3 | border-left: 2px solid hsla(0, 0%, 96%, 1); 4 | } 5 | 6 | div.autodoc-docstring ul { 7 | margin-top: 0; 8 | } 9 | 10 | div.autodoc-docstring li { 11 | margin-bottom: 0; 12 | } 13 | 14 | div.autodoc-docstring p:nth-of-type(2) { 15 | margin-bottom: 5px; 16 | } 17 | -------------------------------------------------------------------------------- /docs/custom-resources.md: -------------------------------------------------------------------------------- 1 | # Custom Resources 2 | 3 | For simple custom resources, it might be easiest to use [Generic Resources](generic-resources.md). 4 | 5 | If you want to work with more complex custom resources, or you want the added type safety of fully defined types, you can define your own Custom Resources. 6 | 7 | ## Defining your own Custom Resources 8 | 9 | First you must define the models that make up your Custom Resource: 10 | 11 | ```python 12 | from typing import Optional 13 | 14 | from lightkube.core.schema import DictMixin, dataclass 15 | from lightkube.models import meta_v1 16 | 17 | 18 | @dataclass 19 | class Owner(DictMixin): 20 | name: str 21 | 22 | 23 | @dataclass 24 | class DogSpec(DictMixin): 25 | breed: str 26 | owner: Owner 27 | 28 | 29 | @dataclass 30 | class DogStatus(DictMixin): 31 | conditions: Optional[list[meta_v1.Condition]] = None 32 | observedGeneration: Optional[int] = None 33 | 34 | 35 | @dataclass 36 | class Dog(DictMixin): 37 | apiVersion: Optional[str] = None 38 | kind: Optional[str] = None 39 | metadata: Optional[meta_v1.ObjectMeta] = None 40 | spec: Optional[DogSpec] = None 41 | status: Optional[DogStatus] = None 42 | ``` 43 | 44 | To be able to use these models as resources in the client, you must create the corresponding `Resource` subclasses: 45 | 46 | ```python 47 | from typing import ClassVar 48 | 49 | from lightkube.codecs import resource_registry 50 | from lightkube.core import resource as res 51 | 52 | from ..models import dog as m_dog 53 | 54 | 55 | # Only needed if your custom resource has a status subresource 56 | class DogStatus(res.NamespacedSubResource, m_dog.Dog): 57 | _api_info = res.ApiInfo( 58 | resource=res.ResourceDef('stable.example.com', 'v1', 'Dog'), 59 | parent=res.ResourceDef('stable.example.com', 'v1', 'Dog'), 60 | plural='dogs', 61 | verbs=['get', 'patch', 'put'], 62 | action='status', 63 | ) 64 | 65 | 66 | @resource_registry.register 67 | class Dog(res.NamespacedResourceG, m_dog.Dog): 68 | _api_info = res.ApiInfo( 69 | resource=res.ResourceDef('stable.example.com', 'v1', 'Dog'), 70 | plural='dogs', 71 | verbs=[ 72 | 'delete', 'deletecollection', 'get', 'global_list', 'global_watch', 73 | 'list', 'patch', 'post', 'put', 'watch' 74 | ], 75 | ) 76 | 77 | # Only needed if your custom resource has a status subresource 78 | Status: ClassVar = DogStatus 79 | ``` 80 | 81 | Once you have defined your custom resource, you can use it with the `Client` as you would with any other resource. 82 | -------------------------------------------------------------------------------- /docs/generic-resources.md: -------------------------------------------------------------------------------- 1 | # Generic Resources 2 | 3 | Sometimes you may need to interact with resources installed in the cluster that are not 4 | provided by default with a kubernetes installation. 5 | You can still interact with such resources using a generic resource. 6 | 7 | ## Interface 8 | 9 | ::: lightkube.generic_resource.create_global_resource 10 | options: 11 | heading_level: 3 12 | 13 | ::: lightkube.generic_resource.create_namespaced_resource 14 | options: 15 | heading_level: 3 16 | 17 | ### Examples 18 | 19 | ```python 20 | from lightkube import Client 21 | from lightkube.generic_resource import create_namespaced_resource 22 | 23 | Job = create_namespaced_resource('stable.example.com', 'v1', 'Job', 'jobs') 24 | 25 | client = Client() 26 | job = client.get(Job, name="job1", namespace="my-namespace") 27 | ``` 28 | 29 | A generic resource is itself a subclass of `dict` so you can access the content as you would do 30 | with a dictionary: 31 | 32 | ```python 33 | print(job["path"]["to"]["something"]) 34 | ``` 35 | 36 | For conveniency, default resources attributes `apiVersion`, `metadata`, `kind` and `status` can be 37 | accessed using the attribute notation: 38 | 39 | ``` 40 | print(job.kind) 41 | print(job.metadata) 42 | ``` 43 | 44 | Specifically metadata is also decoded using the model ``models.meta_v1.ObjectMeta``: 45 | 46 | `print(job.metadata.name)` 47 | 48 | Since it's a dictionary you can create a resource manually as follow: 49 | 50 | ```python 51 | job = Job(metadata={"name": "job2", "namespace": "my-namespace"}, spec=...) 52 | client.create(job) 53 | ``` 54 | 55 | !!! note 56 | Since generic resources are schemaless, more attention need to be given to what 57 | attributes are available or you will get an error from the server. 58 | 59 | Subresources `Status` and `Scale` are also defined: 60 | 61 | ```python 62 | job = client.get(Job.Status, name="job1", namespace="my-namespace") 63 | ``` 64 | 65 | !!! note 66 | Only some resources may support `Scale`. 67 | 68 | ## Convenience Functions for Generic Resources 69 | 70 | Some helper functions are also included to make using generic resources easier: 71 | 72 | ::: lightkube.generic_resource.get_generic_resource 73 | options: 74 | heading_level: 3 75 | 76 | ::: lightkube.generic_resource.load_in_cluster_generic_resources 77 | options: 78 | heading_level: 3 79 | 80 | ::: lightkube.generic_resource.async_load_in_cluster_generic_resources 81 | options: 82 | heading_level: 3 83 | 84 | ::: lightkube.generic_resource.create_resources_from_crd 85 | options: 86 | heading_level: 3 87 | 88 | `load_in_cluster_generic_resources` loads all CRDs in the cluster as generic resources, removing the need for explicitly defining each resource needed. This is especially helpful for scripting around YAML files that may use unknown custom resources. For example, using the [Kubernetes example of the CronTab CRD](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/): 89 | 90 | crontab.yaml: 91 | ```yaml 92 | apiVersion: "stable.example.com/v1" 93 | kind: CronTab 94 | metadata: 95 | name: my-new-cron-object 96 | spec: 97 | cronSpec: "* * * * */5" 98 | image: my-awesome-cron-image 99 | ``` 100 | 101 | ```python 102 | from pathlib import Path 103 | 104 | from lightkube import Client 105 | from lightkube.codecs import load_all_yaml 106 | from lightkube.generic_resource import load_in_cluster_generic_resources 107 | 108 | # This fails with error message: 109 | # lightkube.core.exceptions.LoadResourceError: No module named 'lightkube.resources.stable_example_com_v1'. If using a CRD, ensure you define a generic resource. 110 | resources = load_all_yaml(Path("crontab.yaml").read_text()) 111 | 112 | client = Client() 113 | load_in_cluster_generic_resources(client) 114 | 115 | # Now we can load_all_yaml (and use those loaded resources, for example to create them in cluster) 116 | resources = load_all_yaml(Path("crontab.yaml").read_text()) 117 | ``` 118 | 119 | `create_resource_from_crd` creates generic resources for each version of a `CustomResourceDefinition` object. For example: 120 | 121 | ```python 122 | from lightkube.generic_resource import create_resources_from_crd 123 | from lightkube.resources.apiextensions_v1 import CustomResourceDefinition 124 | from lightkube.models.apiextensions_v1 import ( 125 | CustomResourceDefinitionNames, 126 | CustomResourceDefinitionSpec, 127 | CustomResourceDefinitionVersion, 128 | ) 129 | versions = ['v1alpha1', 'v1'] 130 | 131 | crd = CustomResourceDefinition( 132 | 133 | spec=CustomResourceDefinitionSpec( 134 | group='some.group', 135 | names=CustomResourceDefinitionNames( 136 | kind='somekind', 137 | plural='somekinds', 138 | ), 139 | scope='Namespaced', 140 | versions=[ 141 | CustomResourceDefinitionVersion( 142 | name=version, 143 | served=True, 144 | storage=True, 145 | ) for version in versions 146 | ], 147 | ) 148 | ) 149 | 150 | create_resources_from_crd(crd) # Creates two generic resources, one for each above version 151 | 152 | # To demonstrate this worked: 153 | from lightkube.generic_resource import _created_resources 154 | print("Dict of custom resources that have been defined in Lightkube:") 155 | print(_created_resources) 156 | ``` 157 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | {!README.md!} 2 | -------------------------------------------------------------------------------- /docs/list-watch.md: -------------------------------------------------------------------------------- 1 | # List-Watch pattern 2 | 3 | As documented in [section "Efficient detection of 4 | changes"](https://kubernetes.io/docs/reference/using-api/api-concepts/#efficient-detection-of-changes) 5 | in kubernetes reference, we can use the `resourceVersion` from a list 6 | response in a subsequent watch request, to reliably receive changes 7 | since the list operation. In lightkube this information is available 8 | on the object returned from `list()`: 9 | 10 | ```python 11 | seen_pods = {} 12 | async for pod in (podlist := client.list(Pod)): 13 | seen_pods[pod.metadata.name] = pod 14 | async for event, pod in client.watch(Pod, resource_version=podlist.resourceVersion): 15 | match event: 16 | case "ADDED" | "MODIFIED": 17 | seen_pods[pod.metadata.name] = pod 18 | case "DELETED": 19 | del seen_pods[pod.metadata.name] 20 | ``` 21 | 22 | Note that the field `resourceVersion` is only available after 23 | iteration started, and will raise `lightkube.NotReadyError` if it is 24 | accessed before iterating the result. 25 | -------------------------------------------------------------------------------- /docs/reference/async_client.md: -------------------------------------------------------------------------------- 1 | ::: lightkube.AsyncClient 2 | -------------------------------------------------------------------------------- /docs/reference/client.md: -------------------------------------------------------------------------------- 1 | ::: lightkube.Client 2 | 3 | ## `namespace` parameter 4 | 5 | All API calls for namespaced resources will need to refer to a specific namespace. 6 | By default the namespace provided in the kubeconfig file is used. This default 7 | can be overridden when instantiating the client class with a different value. 8 | You can also specify a specific namespace for a single call using the `namespace` parameter. 9 | 10 | The methods `create` or `replace` will use the namespace defined in the object when it's present. 11 | Notice that if the object namespace and the method's `namespace` parameter are set, 12 | both must have the same value. 13 | 14 | Override rules summary: 15 | 16 | * `client.method(..., namespace=..)` 17 | * [`obj.metadata.namespace`] (Only when calling `create` or `replace`) 18 | * `Client(..., namespace=...)` 19 | * kubernetes config file 20 | 21 | ## List or watch objects in all namespaces 22 | 23 | The methods `list` and `watch` can also return objects for all namespaces using `namespace='*'`. 24 | 25 | -------------------------------------------------------------------------------- /docs/reference/configuration.md: -------------------------------------------------------------------------------- 1 | # Configuration 2 | 3 | ::: lightkube.config.kubeconfig.KubeConfig 4 | 5 | ::: lightkube.config.kubeconfig.SingleConfig 6 | 7 | ::: lightkube.config.kubeconfig.Cluster 8 | -------------------------------------------------------------------------------- /docs/reference/exceptions.md: -------------------------------------------------------------------------------- 1 | # Exceptions 2 | 3 | Lightkube uses httpx for handling http requests and responses. 4 | Because of that, connectivity or timeout issues may raise exceptions. 5 | You can get familiar with the exceptions returned by httpx library [here](https://www.python-httpx.org/exceptions/). 6 | 7 | There are few lightkube specific exceptions: 8 | 9 | ::: lightkube.ConfigError 10 | 11 | This exception is raised if a failure is encountered handling the kubernetes configuration: 12 | 13 | ```python 14 | from lightkube import Client, ConfigError 15 | 16 | try: 17 | client = Client() 18 | except ConfigError as e: 19 | print(e) 20 | ``` 21 | 22 | output: 23 | 24 | ```bash 25 | Configuration file ~/.kube/config not found 26 | ``` 27 | 28 | 29 | ::: lightkube.ApiError 30 | 31 | This exception extends [`httpx.HTTPStatusError`](https://www.python-httpx.org/exceptions/) and is raised when an HTTP error is 32 | returned from kubernetes API. An extra `status` attribute is available with details 33 | about the failure using the standard model [`meta_v1.Status`](https://gtsystem.github.io/lightkube-models/1.19/models/meta_v1/#status). 34 | 35 | ```python 36 | from lightkube import Client, ApiError 37 | 38 | client = Client() 39 | try: 40 | pod = client.get(Pod, name="not-existing-pod") 41 | except ApiError as e: 42 | print(e.status) 43 | ``` 44 | 45 | output: 46 | 47 | ```python 48 | Status( 49 | apiVersion='v1', 50 | code=404, 51 | details=StatusDetails( 52 | causes=None, group=None, kind='pods', 53 | name='not-existing-pod', retryAfterSeconds=None, uid=None 54 | ), 55 | kind='Status', 56 | message='pods "not-existing-pod" not found', 57 | metadata=ListMeta( 58 | continue_=None, remainingItemCount=None, resourceVersion=None, selfLink=None 59 | ), 60 | reason='NotFound', 61 | status='Failure' 62 | ) 63 | ``` 64 | 65 | 66 | ::: lightkube.LoadResourceError 67 | 68 | This exception can be raised when loading an undefined resource using `codecs.from_dict()` 69 | or `codecs.load_all_yaml()` (See [Load/Dump kubernetes objects](../codecs.md)). 70 | 71 | 72 | ::: lightkube.NotReadyError 73 | 74 | This exception is raised when attempting to access the list response attribute `resourceVersion` 75 | before the list has been consumed. For more details see [List-Watch pattern](../list-watch.md) 76 | 77 | 78 | ::: lightkube.exceptions.ObjectDeleted 79 | 80 | This exception is raised when waiting for an object condition using `client.wait(...)`, 81 | but the object itself get deleted. This is to prevent an infinite wait. 82 | 83 | 84 | ::: lightkube.exceptions.ConditionError 85 | 86 | This exception is raised when waiting for an object condition using `client.wait(...)`, 87 | if the condition matches one of the conditions in `raise_for_conditions` parameter. 88 | 89 | 90 | -------------------------------------------------------------------------------- /docs/reference/types.md: -------------------------------------------------------------------------------- 1 | ::: lightkube.types 2 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | # build documentation 2 | mkdocstrings 3 | mkdocstrings-python 4 | markdown 5 | mkautodoc 6 | markdown-include 7 | mkdocs-material 8 | -------------------------------------------------------------------------------- /docs/resources-and-models.md: -------------------------------------------------------------------------------- 1 | # Resources & Models 2 | 3 | ## Reference 4 | lightkube-models 5 | [1.33](https://gtsystem.github.io/lightkube-models/1.33), 6 | [1.32](https://gtsystem.github.io/lightkube-models/1.32), 7 | [1.31](https://gtsystem.github.io/lightkube-models/1.31), 8 | [1.30](https://gtsystem.github.io/lightkube-models/1.30), 9 | [1.29](https://gtsystem.github.io/lightkube-models/1.29), 10 | [1.28](https://gtsystem.github.io/lightkube-models/1.28), 11 | [1.27](https://gtsystem.github.io/lightkube-models/1.27), 12 | [1.26](https://gtsystem.github.io/lightkube-models/1.26), 13 | [1.25](https://gtsystem.github.io/lightkube-models/1.25), 14 | [1.24](https://gtsystem.github.io/lightkube-models/1.24), 15 | [1.23](https://gtsystem.github.io/lightkube-models/1.23), 16 | [1.22](https://gtsystem.github.io/lightkube-models/1.22), 17 | [1.21](https://gtsystem.github.io/lightkube-models/1.21), 18 | [1.20](https://gtsystem.github.io/lightkube-models/1.20), 19 | [1.19](https://gtsystem.github.io/lightkube-models/1.19), 20 | [1.18](https://gtsystem.github.io/lightkube-models/1.18). 21 | 22 | ## Resources 23 | 24 | Kubernetes API provides access to several resource kinds organized by version and 25 | API group. Lightkube represents such resources using classes that can be found inside 26 | the package `lightkube.resources`. 27 | 28 | Resources are organized in modules where the name follow the convention `{group}_{version}`. 29 | For example the group `apps` on version `v1` includes the resource kind `Deployment` 30 | and it can be accessed as follow `from lightkube.resources.apps_v1 import Deployment`. 31 | 32 | Resource classes can be used to call the lightkube client methods or to instantiate the corresponding 33 | objects. 34 | 35 | ```python 36 | >>> from lightkube import Client 37 | >>> from lightkube.resources.apps_v1 import Deployment 38 | 39 | >>> client = Client() 40 | >>> dep = client.get(Deployment, name="my-deo") 41 | >>> type(dep) 42 | 43 | ``` 44 | 45 | ### Subresources 46 | 47 | Some kubernetes resources provide extra subresources like `/status`. 48 | Subresources can be found as attributes of the corresponding resource class. 49 | For example `Deployment` provides `Deployment.Status` and `Deployment.Scale`. 50 | Similar to resources, subresources can be used directly with the lightkube client. 51 | 52 | ## Models 53 | 54 | The package `lightkube.models` provides models for all schemas defined in the kubernetes API. 55 | The models are split in modules in a similar way to resources (i.e. the module name match `{group}_{version}`). 56 | All models are defined using standard python dataclasses and are used 57 | to create or retrieve kubernetes objects. 58 | 59 | ```python 60 | >>> from lightkube.models.meta_v1 import ObjectMeta 61 | >>> ObjectMeta(name='test', namespace='default') 62 | ObjectMeta(annotations=None, clusterName=None, creationTimestamp=None, deletionGracePeriodSeconds=None, deletionTimestamp=None, finalizers=None, generateName=None, generation=None, initializers=None, labels=None, managedFields=None, name='test', namespace='default', ownerReferences=None, resourceVersion=None, selfLink=None, uid=None) 63 | ``` 64 | 65 | Resources are also subclasses of models but they hold extra information 66 | regarding the way the resource can be accessed. 67 | The lightkube client need such information, so will only accept 68 | resources or resource instances as parameters. 69 | 70 | ## Versioning 71 | 72 | Resource and Models are part of a separate python package named 73 | `lightkube-models`. This package follows the version of the corresponding 74 | kubernetes API: 75 | 76 | {k8s-version}.{release} 77 | 78 | For example the package version `1.15.6.1` match kubernetes version `1.15.6` 79 | at release 1. 80 | 81 | Depending on the Kubernetes server in use, the appropriate version 82 | should be selected as follow `lightkube-models>=1.15,<1.16`. 83 | 84 | A list of available versions, can be seen on [pypi](https://pypi.org/project/lightkube-models/#history). 85 | -------------------------------------------------------------------------------- /docs/selectors.md: -------------------------------------------------------------------------------- 1 | # Selectors 2 | 3 | The methods `Client.list` and `Client.watch` allows to filter results on server side 4 | using the attributes `labels` and `fields`. 5 | 6 | 7 | ## Label Selectors 8 | 9 | The attribute `labels` represents a set of requirements computed against the object labels 10 | that need to be satisfied in order for an object to be matched. 11 | The parameter value is as a dictionary where key represent a label key 12 | and the value represent a matching operation. 13 | 14 | This is equivalent to use the Kubernetes API parameters `labelSelector`. 15 | For more details regarding label selectors see the official [Kubernetes documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels). 16 | 17 | This is the list of possible matching operations: 18 | 19 | | Operator | Signature | Example | Alternative syntax | 20 | |---|---|---|---| 21 | | Equal | `equal(value)` | `{"env": equal("prod")}` | `{"env": "prod"}` | 22 | | Not equal | `not_equal(value)` | `{"env": not_equal("prod")}` | - | 23 | | Exists | `exists()` | `{"env": exists()}` | `{"env": None}` | 24 | | Not exists | `not_exists()` | `{"env": not_exists()}` | - | 25 | | In | `in_(sequence)` | `{"env": in_(["prod", "dev"])}` | `{"env": ["prod", "dev"]}` | 26 | | Not in | `not_in(sequence)` | `{"env": not_in(["prod", "dev"])}` | - | 27 | 28 | ### Examples 29 | 30 | Match objects having a label with key `env` and value `prod`: 31 | ```python 32 | labels={"env": "prod"} 33 | ``` 34 | 35 | Match objects having `env == prod` and `app == myapp`: 36 | ```python 37 | labels={"env": "prod", "app": "myapp"} 38 | ``` 39 | 40 | Match objects having `env == prod` and a label with key `app`: 41 | ```python 42 | labels={"env": "prod", "app": None} 43 | ``` 44 | 45 | Match objects having `env == prod` or `env == dev`: 46 | ```python 47 | labels={"env": ("prod", "dev")} 48 | ``` 49 | 50 | The following example uses the operators functions: 51 | 52 | ```python 53 | from lightkube import operators as op 54 | ``` 55 | 56 | Match objects not having a label key `app`: 57 | ```python 58 | labels={"app": op.not_exists()} 59 | ``` 60 | 61 | Match objects where `env != prod`: 62 | ```python 63 | labels={"env": op.not_equal("prod")} 64 | ``` 65 | 66 | Match objects where `env != prod and env != dev`: 67 | ```python 68 | labels={"env": op.not_in(["prod", "dev"])} 69 | ``` 70 | 71 | ## Field Selectors 72 | 73 | The attribute `fields` let you select Kubernetes resources based on the value of 74 | one or more resource fields. This is equivalent to use the Kubernetes API parameters `fieldSelector`. 75 | For more details regarding field selectors see the official [Kubernetes documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors). 76 | 77 | !!! note 78 | Each resource support a specific (and very limited) set of fields that can be used in the selector. 79 | 80 | !!! note 81 | The valid operations for field selectors are only "equal", "not equal" and "not in". 82 | 83 | ### Examples 84 | 85 | Match objects where the name is `myobj`: 86 | ```python 87 | fields={"metadata.name": "myobj"} 88 | ``` 89 | 90 | Match objects where status phase is "Pending": 91 | ```python 92 | fields={"status.phase": "Pending"} 93 | ``` 94 | 95 | Match objects if they are not in the "default" namespace: 96 | ```python 97 | fields={"metadata.namespace": op.not_equal("default")} 98 | ``` 99 | -------------------------------------------------------------------------------- /docs/utils.md: -------------------------------------------------------------------------------- 1 | # Utils 2 | 3 | ## Quantity 4 | 5 | ### Convert quantity string to decimal 6 | 7 | K8s converts user input 8 | [quantities](https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/) 9 | to "canonical form": 10 | 11 | > Before serializing, Quantity will be put in "canonical form". This means that 12 | > Exponent/suffix will be adjusted up or down (with a corresponding increase or 13 | > decrease in Mantissa) such that: a. No precision is lost b. No fractional 14 | > digits will be emitted c. The exponent (or suffix) is as large as possible. 15 | > The sign will be omitted unless the number is negative. 16 | > 17 | > Examples: 1.5 will be serialized as "1500m" 1.5Gi will be serialized as "1536Mi" 18 | 19 | Additional examples: 20 | 21 | | User input | K8s representation | 22 | |----------------------------------|-------------------------------| 23 | | `{"memory": "0.9Gi"}` | `{"memory": "966367641600m"}` | 24 | | `{"cpu": "0.30000000000000004"}` | `{"cpu": "301m"}` | 25 | 26 | You may need to compare different quantities when interacting with K8s. 27 | 28 | ### Interface 29 | 30 | ::: lightkube.utils.quantity.parse_quantity 31 | options: 32 | heading_level: 4 33 | 34 | ::: lightkube.utils.quantity.equals_canonically 35 | options: 36 | heading_level: 4 37 | 38 | ### Examples 39 | 40 | #### Compare container memory request with limit 41 | 42 | ```python 43 | from lightkube.utils.quantity import parse_quantity 44 | 45 | pod = client.get(Pod, name='my-pod') 46 | container_res = pod.spec.containers[0].resources 47 | if parse_quantity(container_res.requests['memory']) < parse_quantity(container_res.limits['memory']): 48 | ... # request is less than limit, do something ... 49 | ``` 50 | 51 | #### Compare container request with limit 52 | 53 | ```python 54 | from lightkube.utils.quantity import equals_canonically 55 | 56 | pod = client.get(Pod, name='my-pod') 57 | container_res = pod.spec.containers[0].resources 58 | if equals_canonically(container_res.requests, container_res.limits): 59 | ... # requests and limits are the same ... 60 | ``` 61 | 62 | #### Complete example 63 | 64 | After patching a statefulset's resource limits you may want to compare 65 | user's input to the statefulset's template to the active podspec: 66 | 67 | ```python 68 | >>> from lightkube import Client 69 | >>> from lightkube.models.apps_v1 import StatefulSetSpec 70 | >>> from lightkube.models.core_v1 import (Container, PodSpec, PodTemplateSpec, ResourceRequirements) 71 | >>> from lightkube.resources.apps_v1 import StatefulSet 72 | >>> from lightkube.resources.core_v1 import Pod 73 | >>> from lightkube.types import PatchType 74 | >>> 75 | >>> resource_reqs = ResourceRequirements( 76 | ... limits={"cpu": "0.8", "memory": "0.9Gi"}, 77 | ... requests={"cpu": "0.4", "memory": "0.5Gi"}, 78 | ... ) 79 | >>> 80 | >>> client = Client() 81 | >>> statefulset = client.get(StatefulSet, name="prom") 82 | >>> 83 | >>> delta = StatefulSet( 84 | ... spec=StatefulSetSpec( 85 | ... selector=statefulset.spec.selector, 86 | ... serviceName=statefulset.spec.serviceName, 87 | ... template=PodTemplateSpec( 88 | ... spec=PodSpec( 89 | ... containers=[Container(name="prometheus", resources=resource_reqs)] 90 | ... ) 91 | ... ) 92 | ... ) 93 | ... ) 94 | >>> 95 | >>> client.patch(StatefulSet, "prom", delta, patch_type=PatchType.APPLY, field_manager="just me") 96 | >>> client.get(StatefulSet, name="prom").spec.template.spec.containers[1].resources 97 | ResourceRequirements(limits={'cpu': '800m', 'memory': '966367641600m'}, requests={'cpu': '400m', 'memory': '512Mi'}) 98 | >>> 99 | >>> pod = client.get(Pod, name="prom-0") 100 | >>> pod.spec.containers[1].resources 101 | ResourceRequirements(limits={'cpu': '800m', 'memory': '966367641600m'}, requests={'cpu': '400m', 'memory': '512Mi'}) 102 | >>> 103 | >>> from lightkube.utils.quantity import parse_quantity 104 | >>> parse_quantity(pod.spec.containers[1].resources.requests["memory"]) 105 | Decimal('536870912.000') 106 | >>> parse_quantity(pod.spec.containers[1].resources.requests["memory"]) == parse_quantity(resource_reqs.requests["memory"]) 107 | True 108 | >>> 109 | >>> from lightkube.utils.quantity import equals_canonically 110 | >>> equals_canonically(pod.spec.containers[1].resources.limits, resource_reqs.limits) 111 | True 112 | >>> equals_canonically(pod.spec.containers[1].resources, resource_reqs) 113 | True 114 | ``` 115 | -------------------------------------------------------------------------------- /e2e-tests/test-crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | # name must match the spec fields below, and be in the form: . 5 | name: crontabs.stable.example.com 6 | spec: 7 | # group name to use for REST API: /apis// 8 | group: stable.example.com 9 | # list of versions supported by this CustomResourceDefinition 10 | versions: 11 | - name: v1 12 | # Each version can be enabled/disabled by Served flag. 13 | served: true 14 | # One and only one version must be marked as the storage version. 15 | storage: true 16 | schema: 17 | openAPIV3Schema: 18 | type: object 19 | properties: 20 | spec: 21 | type: object 22 | properties: 23 | cronSpec: 24 | type: string 25 | image: 26 | type: string 27 | replicas: 28 | type: integer 29 | # either Namespaced or Cluster 30 | scope: Namespaced 31 | names: 32 | # plural name to be used in the URL: /apis/// 33 | plural: crontabs 34 | # singular name to be used as an alias on the CLI and for display 35 | singular: crontab 36 | # kind is normally the CamelCased singular type. Your resource manifests use this. 37 | kind: CronTab 38 | # shortNames allow shorter string to match your resource on the CLI 39 | shortNames: 40 | - ct 41 | -------------------------------------------------------------------------------- /e2e-tests/test_client.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from pathlib import Path 3 | from string import ascii_lowercase 4 | from random import choices 5 | 6 | import pytest 7 | 8 | from lightkube import Client, ApiError, AsyncClient 9 | from lightkube.types import PatchType 10 | from lightkube.resources.core_v1 import Pod, Node, ConfigMap, Service, Namespace 11 | from lightkube.resources.apps_v1 import Deployment 12 | from lightkube.models.meta_v1 import ObjectMeta 13 | from lightkube.models.core_v1 import PodSpec, Container, ServiceSpec, ServicePort 14 | from lightkube.codecs import load_all_yaml 15 | from lightkube.generic_resource import create_namespaced_resource, get_generic_resource, \ 16 | load_in_cluster_generic_resources, async_load_in_cluster_generic_resources 17 | 18 | uid_count = 0 19 | 20 | 21 | @pytest.fixture 22 | def obj_name(): 23 | global uid_count 24 | uid_count += 1 25 | return f'test-{datetime.now().strftime("%Y%m%d%H%M%S")}-{uid_count}' 26 | 27 | 28 | def names(obj_list): 29 | return [obj.metadata.name for obj in obj_list] 30 | 31 | 32 | def create_pod(name, command) -> Pod: 33 | return Pod( 34 | metadata=ObjectMeta(name=name, labels={'app-name': name}), 35 | spec=PodSpec(containers=[Container( 36 | name='main', 37 | image='busybox', 38 | args=[ 39 | "/bin/sh", 40 | "-c", 41 | command 42 | ], 43 | )], terminationGracePeriodSeconds=1) 44 | ) 45 | 46 | 47 | def wait_pod(client, pod): 48 | # watch pods 49 | for etype, pod in client.watch(Pod, labels={'app-name': pod.metadata.name}, 50 | resource_version=pod.metadata.resourceVersion): 51 | if pod.status.phase != 'Pending': 52 | break 53 | 54 | 55 | def test_pod_apis(obj_name): 56 | client = Client() 57 | 58 | # list kube-system namespace 59 | pods = [pod.metadata.name for pod in client.list(Pod, namespace='kube-system')] 60 | assert len(pods) > 0 61 | assert any(name.startswith('metrics-server') for name in pods) 62 | 63 | # create a pod 64 | pod = client.create(create_pod(obj_name, "while true;do echo 'this is a test';sleep 5; done")) 65 | try: 66 | assert pod.metadata.name == obj_name 67 | assert pod.metadata.namespace == client.namespace 68 | assert pod.status.phase 69 | 70 | 71 | wait_pod(client, pod) 72 | 73 | # read pod logs 74 | for l in client.log(obj_name, follow=True): 75 | assert l == 'this is a test\n' 76 | break 77 | finally: 78 | # delete the pod 79 | client.delete(Pod, obj_name) 80 | 81 | 82 | def test_pod_not_exist(): 83 | client = Client() 84 | with pytest.raises(ApiError) as exc_info: 85 | client.get(Pod, name='this-pod-is-not-found') 86 | 87 | status = exc_info.value.status 88 | assert status.code == 404 89 | assert status.details.name == 'this-pod-is-not-found' 90 | assert status.reason == 'NotFound' 91 | assert status.status == 'Failure' 92 | 93 | 94 | def test_pod_already_exist(obj_name): 95 | client = Client() 96 | client.create(create_pod(obj_name, "sleep 5")) 97 | try: 98 | with pytest.raises(ApiError) as exc_info: 99 | client.create(create_pod(obj_name, "sleep 5")) 100 | status = exc_info.value.status 101 | assert status.code == 409 102 | assert status.reason == 'AlreadyExists' 103 | assert status.status == 'Failure' 104 | finally: 105 | # delete the pod 106 | client.delete(Pod, obj_name) 107 | 108 | 109 | def test_global_methods(): 110 | client = Client() 111 | nodes = [node.metadata.name for node in client.list(Node)] 112 | assert len(nodes) > 0 113 | node = client.get(Node, name=nodes[0]) 114 | assert node.metadata.name == nodes[0] 115 | assert node.metadata.labels['kubernetes.io/os'] == node.status.nodeInfo.operatingSystem 116 | 117 | 118 | def test_namespaced_methods(obj_name): 119 | client = Client() 120 | config = ConfigMap( 121 | metadata=ObjectMeta(name=obj_name, namespace='default'), 122 | data={'key1': 'value1', 'key2': 'value2'} 123 | ) 124 | 125 | # create 126 | config = client.create(config) 127 | try: 128 | assert config.metadata.name == obj_name 129 | assert config.data['key1'] == 'value1' 130 | assert config.data['key2'] == 'value2' 131 | 132 | # replace 133 | config.data['key1'] = 'new value' 134 | config = client.replace(config) 135 | assert config.data['key1'] == 'new value' 136 | assert config.data['key2'] == 'value2' 137 | 138 | # patch with PatchType.STRATEGIC 139 | patch = {'metadata': {'labels': {'app': 'xyz'}}} 140 | config = client.patch(ConfigMap, name=obj_name, obj=patch) 141 | assert config.metadata.labels['app'] == 'xyz' 142 | 143 | # get 144 | config2 = client.get(ConfigMap, name=obj_name) 145 | assert config.metadata.creationTimestamp == config2.metadata.creationTimestamp 146 | 147 | # list 148 | configs = [config.metadata.name for config in client.list(ConfigMap)] 149 | assert obj_name in configs 150 | 151 | finally: 152 | client.delete(ConfigMap, name=obj_name) 153 | 154 | 155 | def test_patching(obj_name): 156 | client = Client() 157 | service = Service( 158 | metadata=ObjectMeta(name=obj_name), 159 | spec=ServiceSpec( 160 | ports=[ServicePort(name='a', port=80, targetPort=8080)], 161 | selector={'app': 'not-existing'} 162 | ) 163 | ) 164 | 165 | # create 166 | client.create(service) 167 | try: 168 | # patch with PatchType.STRATEGIC 169 | patch = {'spec': {'ports': [{'name': 'b', 'port':81, 'targetPort': 8081}]}} 170 | service = client.patch(Service, name=obj_name, obj=patch) 171 | assert len(service.spec.ports) == 2 172 | assert {port.name for port in service.spec.ports} == {'a', 'b'} 173 | 174 | # strategic - patch merge key: port 175 | # we also try to send a Resource type for patching 176 | patch = Service(spec=ServiceSpec(ports=[ServicePort(name='b', port=81, targetPort=8082)])) 177 | service = client.patch(Service, name=obj_name, obj=patch) 178 | assert len(service.spec.ports) == 2 179 | 180 | for port in service.spec.ports: 181 | if port.port == 81: 182 | assert port.targetPort == 8082 183 | 184 | # patch with PatchType.MERGE 185 | # merge will replace the full list 186 | patch = {'spec': {'ports': [{'name': 'b', 'port': 81, 'targetPort': 8081}]}} 187 | service = client.patch(Service, name=obj_name, obj=patch, patch_type=PatchType.MERGE) 188 | assert len(service.spec.ports) == 1 189 | assert service.spec.ports[0].port == 81 190 | 191 | # patch with PatchType.JSON 192 | patch = [ 193 | {'op': 'add', 'path': '/spec/ports/-', 'value': {'name': 'a', 'port': 80, 'targetPort': 8080}} 194 | ] 195 | service = client.patch(Service, name=obj_name, obj=patch, patch_type=PatchType.JSON) 196 | assert len(service.spec.ports) == 2 197 | assert service.spec.ports[1].port == 80 198 | 199 | finally: 200 | client.delete(Service, name=obj_name) 201 | 202 | 203 | def test_apply(obj_name): 204 | client = Client(field_manager='lightkube') 205 | config = ConfigMap( 206 | apiVersion='v1', # apiVersion and kind are required for server-side apply 207 | kind='ConfigMap', 208 | metadata=ObjectMeta(name=obj_name, namespace='default'), 209 | data={'key1': 'value1', 'key2': 'value2'} 210 | ) 211 | 212 | # create with apply 213 | c = client.apply(config) 214 | try: 215 | assert c.metadata.name == obj_name 216 | assert c.data['key1'] == 'value1' 217 | assert c.data['key2'] == 'value2' 218 | 219 | # modify 220 | config.data['key2'] = 'new value' 221 | del config.data['key1'] 222 | config.data['key3'] = 'value3' 223 | c = client.apply(config) 224 | assert c.data['key2'] == 'new value' 225 | assert c.data['key3'] == 'value3' 226 | assert 'key1' not in c.data 227 | 228 | # remove all keys 229 | config.data.clear() 230 | c = client.apply(config) 231 | assert not c.data 232 | 233 | # use the patch equivalent 234 | config.data['key1'] = 'new value' 235 | c = client.patch(ConfigMap, obj_name, config.to_dict(), patch_type=PatchType.APPLY) 236 | assert c.data['key1'] == 'new value' 237 | 238 | finally: 239 | client.delete(ConfigMap, name=obj_name) 240 | 241 | 242 | def test_deletecollection(obj_name): 243 | client = Client() 244 | 245 | config = ConfigMap( 246 | metadata=ObjectMeta(name=obj_name, namespace=obj_name), 247 | data={'key1': 'value1', 'key2': 'value2'} 248 | ) 249 | 250 | client.create(Namespace(metadata=ObjectMeta(name=obj_name))) 251 | 252 | try: 253 | # create 254 | client.create(config) 255 | config.metadata.name = f"{obj_name}-2" 256 | client.create(config) 257 | 258 | # k3s automatically create/recreate one extra configmap. 259 | maps = names(client.list(ConfigMap, namespace=obj_name)) 260 | assert obj_name in maps 261 | assert f"{obj_name}-2" in maps 262 | 263 | client.deletecollection(ConfigMap, namespace=obj_name) 264 | maps = names(client.list(ConfigMap, namespace=obj_name)) 265 | assert obj_name not in maps 266 | assert f"{obj_name}-2" not in maps 267 | 268 | finally: 269 | client.delete(Namespace, name=obj_name) 270 | 271 | 272 | def test_list_all_ns(obj_name): 273 | client = Client() 274 | ns1 = obj_name 275 | ns2 = f"{obj_name}-2" 276 | 277 | config = ConfigMap( 278 | metadata=ObjectMeta(name=obj_name), 279 | data={'key1': 'value1', 'key2': 'value2'} 280 | ) 281 | 282 | client.create(Namespace(metadata=ObjectMeta(name=ns1))) 283 | client.create(Namespace(metadata=ObjectMeta(name=ns2))) 284 | 285 | try: 286 | client.create(config, namespace=ns1) 287 | client.create(config, namespace=ns2) 288 | 289 | maps = [f"{cm.metadata.namespace}/{cm.metadata.name}" for cm in client.list(ConfigMap, namespace='*')] 290 | assert f"{ns1}/{obj_name}" in maps 291 | assert f"{ns2}/{obj_name}" in maps 292 | 293 | finally: 294 | client.delete(Namespace, name=ns1) 295 | client.delete(Namespace, name=ns2) 296 | 297 | 298 | def test_crd(): 299 | client = Client() 300 | fname = Path(__file__).parent.joinpath('test-crd.yaml') 301 | with fname.open() as f: 302 | crd = list(load_all_yaml(f))[0] 303 | 304 | CronTab = create_namespaced_resource( 305 | group="stable.example.com", 306 | version="v1", 307 | kind="CronTab", 308 | plural="crontabs", 309 | verbs=None 310 | ) 311 | 312 | client.create(crd) 313 | # CRD endpoints are not ready immediately, we need to wait for condition `Established` 314 | client.wait(crd.__class__, name=crd.metadata.name, for_conditions=['Established']) 315 | try: 316 | client.create(CronTab( 317 | metadata={'name': 'my-cron'}, 318 | spec={'cronSpec': '* * * * */5', 'image': 'my-awesome-cron-image'}, 319 | )) 320 | 321 | ct = client.get(CronTab, name='my-cron') 322 | assert ct.metadata.name == 'my-cron' 323 | assert ct.spec['cronSpec'] == '* * * * */5' 324 | finally: 325 | client.delete(crd.__class__, name=crd.metadata.name) 326 | 327 | 328 | @pytest.mark.parametrize("resource", [Node]) 329 | def test_wait_global(resource): 330 | client = Client() 331 | 332 | for obj in client.list(resource): 333 | client.wait(resource, obj.metadata.name, for_conditions=["Ready"]) 334 | 335 | 336 | @pytest.mark.asyncio 337 | @pytest.mark.parametrize("resource", [Node]) 338 | async def test_wait_global_async(resource): 339 | client = AsyncClient() 340 | 341 | async for obj in client.list(resource): 342 | await client.wait(resource, obj.metadata.name, for_conditions=["Ready"]) 343 | 344 | await client.close() 345 | 346 | 347 | WAIT_NAMESPACED_PARAMS = [ 348 | (Pod, "Ready", {"containers": [{"name": "nginx", "image": "nginx:1.21.4"}]}), 349 | ( 350 | Deployment, 351 | "Available", 352 | { 353 | "selector": {"matchLabels": {"foo": "bar"}}, 354 | "template": { 355 | "metadata": {"labels": {"foo": "bar"}}, 356 | "spec": {"containers": [{"name": "nginx", "image": "nginx:1.21.4"}]}, 357 | }, 358 | }, 359 | ), 360 | ] 361 | 362 | 363 | @pytest.mark.parametrize("resource,for_condition,spec", WAIT_NAMESPACED_PARAMS) 364 | def test_wait_namespaced(resource, for_condition, spec): 365 | client = Client() 366 | 367 | requested = resource.from_dict( 368 | {"metadata": {"generateName": "e2e-test-"}, "spec": spec} 369 | ) 370 | created = client.create(requested) 371 | client.wait( 372 | resource, 373 | created.metadata.name, 374 | for_conditions=[for_condition], 375 | ) 376 | client.delete(resource, created.metadata.name) 377 | 378 | 379 | @pytest.mark.asyncio 380 | @pytest.mark.parametrize("resource,for_condition,spec", WAIT_NAMESPACED_PARAMS) 381 | async def test_wait_namespaced_async(resource, for_condition, spec): 382 | client = AsyncClient() 383 | 384 | requested = resource.from_dict( 385 | {"metadata": {"generateName": "e2e-test-"}, "spec": spec} 386 | ) 387 | created = await client.create(requested) 388 | await client.wait( 389 | resource, 390 | created.metadata.name, 391 | for_conditions=[for_condition], 392 | ) 393 | await client.delete(resource, created.metadata.name) 394 | 395 | await client.close() 396 | 397 | 398 | @pytest.fixture(scope="function") 399 | def sample_crd(): 400 | client = Client() 401 | fname = Path(__file__).parent.joinpath('test-crd.yaml') 402 | with fname.open() as f: 403 | crd = list(load_all_yaml(f))[0] 404 | 405 | # modify the crd to be unique, avoiding collision with other tests 406 | prefix = "".join(choices(ascii_lowercase, k=5)) 407 | crd.spec.group = f"{prefix}{crd.spec.group}" 408 | crd.spec.names.plural = f"{prefix}{crd.spec.names.plural}" 409 | crd.spec.names.singular = f"{prefix}{crd.spec.names.singular}" 410 | crd.spec.names.kind = f"{prefix}{crd.spec.names.kind}" 411 | crd.spec.names.shortnames = [f"{prefix}{shortname}" for shortname in crd.spec.names.shortNames] 412 | crd.metadata.name = f"{crd.spec.names.plural}.{crd.spec.group}" 413 | 414 | client.create(crd) 415 | # CRD endpoints are not ready immediately, we need to wait for condition `Established` 416 | client.wait(crd.__class__, name=crd.metadata.name, for_conditions=['Established']) 417 | 418 | yield crd 419 | 420 | client.delete(crd.__class__, name=crd.metadata.name) 421 | 422 | 423 | def test_load_in_cluster_generic_resources(sample_crd): 424 | client = Client() 425 | 426 | # Assert that we do not yet have a generic resource for this CR 427 | cr_version = f"{sample_crd.spec.group}/{sample_crd.spec.versions[0].name}" 428 | cr_kind = sample_crd.spec.names.kind 429 | gr = get_generic_resource(cr_version, cr_kind) 430 | assert gr is None 431 | 432 | load_in_cluster_generic_resources(client) 433 | 434 | # Assert that we now have a generic resource for this CR 435 | gr = get_generic_resource(cr_version, cr_kind) 436 | assert gr is not None 437 | 438 | 439 | @pytest.mark.asyncio 440 | async def test_load_in_cluster_generic_resources_async(sample_crd): 441 | client = AsyncClient() 442 | 443 | # Assert that we do not yet have a generic resource for this CR 444 | cr_version = f"{sample_crd.spec.group}/{sample_crd.spec.versions[0].name}" 445 | cr_kind = sample_crd.spec.names.kind 446 | gr = get_generic_resource(cr_version, cr_kind) 447 | assert gr is None 448 | 449 | await async_load_in_cluster_generic_resources(client) 450 | 451 | # Assert that we now have a generic resource for this CR 452 | gr = get_generic_resource(cr_version, cr_kind) 453 | assert gr is not None 454 | -------------------------------------------------------------------------------- /lightkube/__init__.py: -------------------------------------------------------------------------------- 1 | from .core.client import Client 2 | from .core.async_client import AsyncClient 3 | from .core.generic_client import ALL_NS 4 | from .core.exceptions import ApiError, NotReadyError, ConfigError, LoadResourceError 5 | from .core.sort_objects import sort_objects 6 | from .config.kubeconfig import KubeConfig, SingleConfig 7 | 8 | __all__ = [ 9 | "Client", 10 | "AsyncClient", 11 | "ALL_NS", 12 | "ApiError", 13 | "NotReadyError", 14 | "ConfigError", 15 | "LoadResourceError", 16 | "sort_objects", 17 | "KubeConfig", 18 | "SingleConfig", 19 | ] 20 | -------------------------------------------------------------------------------- /lightkube/codecs.py: -------------------------------------------------------------------------------- 1 | from typing import Union, TextIO, Iterator, List, Mapping 2 | 3 | import yaml 4 | 5 | from .generic_resource import ( 6 | GenericGlobalResource, 7 | GenericNamespacedResource, 8 | create_resources_from_crd, 9 | ) 10 | from .core.exceptions import LoadResourceError 11 | from .core.resource_registry import resource_registry 12 | 13 | __all__ = ["from_dict", "load_all_yaml", "dump_all_yaml", "resource_registry"] 14 | 15 | try: 16 | import jinja2 17 | except ImportError: 18 | jinja2 = None 19 | 20 | REQUIRED_ATTR = ("apiVersion", "kind") 21 | 22 | AnyResource = Union[GenericGlobalResource, GenericNamespacedResource] 23 | 24 | 25 | def from_dict(d: dict) -> AnyResource: 26 | """Converts a kubernetes resource defined as python dict to the corresponding resource object. 27 | If the dict represent a standard resource, the function will automatically load the appropriate 28 | resource type. Generic resources are also supported and used assuming they were defined prior to 29 | the function call. Returns the resource object or raise a `LoadResourceError`. 30 | 31 | **parameters** 32 | 33 | * **d** - A dictionary representing a Kubernetes resource. Keys `apiVersion` and `kind` are 34 | always required. 35 | """ 36 | if not isinstance(d, Mapping): 37 | raise LoadResourceError("Invalid resource definition, not a dict.") 38 | for attr in REQUIRED_ATTR: 39 | if attr not in d: 40 | raise LoadResourceError( 41 | f"Invalid resource definition, key '{attr}' missing." 42 | ) 43 | 44 | model = resource_registry.load(d["apiVersion"], d["kind"]) 45 | return model.from_dict(d) 46 | 47 | 48 | def load_all_yaml( 49 | stream: Union[str, TextIO], 50 | context: dict = None, 51 | template_env=None, 52 | create_resources_for_crds: bool = False, 53 | ) -> List[AnyResource]: 54 | """Load kubernetes resource objects defined as YAML. See `from_dict` regarding how resource types are detected. 55 | Returns a list of resource objects or raise a `LoadResourceError`. Skips any empty YAML documents in the 56 | stream, returning an empty list if all YAML documents are empty. Deep parse any items from .*List resources. 57 | 58 | **parameters** 59 | 60 | * **stream** - A file-like object or a string representing a yaml file or a template resulting in 61 | a yaml file. 62 | * **context** - When is not `None` the stream is considered a `jinja2` template and the `context` 63 | will be used during templating. 64 | * **template_env** - `jinja2` template environment to be used for templating. When absent a standard 65 | environment is used. 66 | * **create_resources_for_crds** - If True, a generic resource will be created for every version 67 | of every CRD found that does not already have a generic resource. There will be no side 68 | effect for any CRD that already has a generic resource. Else if False, no generic resources 69 | will be created. Default is False. 70 | 71 | **NOTE**: When using the template functionality (setting the context parameter), the dependency 72 | module `jinja2` need to be installed. 73 | """ 74 | if context is not None: 75 | stream = _template(stream, context=context, template_env=template_env) 76 | 77 | def _flatten(objects: Iterator) -> List[AnyResource]: 78 | """Flatten resources which have a kind = *List.""" 79 | resources = [] 80 | for obj in objects: 81 | if obj is None: 82 | continue 83 | if isinstance(obj, Mapping) and obj.get("kind", "").endswith("List"): 84 | resources += _flatten(obj.get("items") or []) 85 | else: 86 | res = from_dict(obj) 87 | resources.append(res) 88 | 89 | if ( 90 | create_resources_for_crds is True 91 | and res.kind == "CustomResourceDefinition" 92 | ): 93 | create_resources_from_crd(res) 94 | return resources 95 | 96 | return _flatten(yaml.safe_load_all(stream)) 97 | 98 | 99 | def dump_all_yaml(resources: List[AnyResource], stream: TextIO = None, indent=2): 100 | """Write kubernetes resource objects as YAML into an open file. 101 | 102 | **parameters** 103 | 104 | * **resources** - List of resources to write on the file 105 | * **stream** - Path to a file where to write the resources. When not set the content is returned 106 | as a string. 107 | * **indent** - Number of characters for indenting nasted blocks. 108 | """ 109 | res = [r.to_dict() for r in resources] 110 | return yaml.safe_dump_all(res, stream, indent=indent) 111 | 112 | 113 | def _template( 114 | stream: Union[str, TextIO], context: dict = None, template_env=None 115 | ) -> List[AnyResource]: 116 | """ 117 | Template a stream using jinja2 and the given context 118 | """ 119 | if jinja2 is None: 120 | raise ImportError("load_from_template requires jinja2 to be installed") 121 | 122 | if template_env is None: 123 | template_env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True) 124 | elif not isinstance(template_env, jinja2.Environment): 125 | raise LoadResourceError("template_env is not a valid jinja2 template") 126 | 127 | tmpl = template_env.from_string( 128 | stream if isinstance(stream, str) else stream.read() 129 | ) 130 | return tmpl.render(**context) 131 | -------------------------------------------------------------------------------- /lightkube/config/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gtsystem/lightkube/ddeb2928c3de93b48e13933e5ad07d0ed7b115fc/lightkube/config/__init__.py -------------------------------------------------------------------------------- /lightkube/config/client_adapter.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import ssl 4 | import subprocess 5 | from typing import Optional 6 | import asyncio.subprocess 7 | 8 | import httpx 9 | 10 | from .kubeconfig import SingleConfig 11 | from .models import Cluster, User, UserExec, FileStr 12 | from ..core.exceptions import ConfigError 13 | 14 | 15 | def Client( 16 | config: SingleConfig, 17 | timeout: httpx.Timeout, 18 | trust_env: bool = True, 19 | transport: httpx.BaseTransport = None, 20 | proxy: str = None, 21 | ) -> httpx.Client: 22 | return httpx.Client( 23 | transport=transport, **httpx_parameters(config, timeout, proxy, trust_env) 24 | ) 25 | 26 | 27 | def AsyncClient( 28 | config: SingleConfig, 29 | timeout: httpx.Timeout, 30 | trust_env: bool = True, 31 | transport: httpx.AsyncBaseTransport = None, 32 | proxy: str = None, 33 | ) -> httpx.AsyncClient: 34 | return httpx.AsyncClient( 35 | transport=transport, **httpx_parameters(config, timeout, proxy, trust_env) 36 | ) 37 | 38 | 39 | def httpx_parameters(config: SingleConfig, timeout: httpx.Timeout, proxy: str, trust_env: bool): 40 | return dict( 41 | timeout=timeout, 42 | proxy=proxy, 43 | base_url=config.cluster.server, 44 | verify=verify_cluster(config.cluster, config.user, config.abs_file, trust_env=trust_env), 45 | auth=user_auth(config.user), 46 | trust_env=trust_env, 47 | ) 48 | 49 | 50 | class BearerAuth(httpx.Auth): 51 | def __init__(self, token): 52 | self._bearer = f"Bearer {token}" 53 | 54 | def auth_flow(self, request: httpx.Request): 55 | request.headers["Authorization"] = self._bearer 56 | yield request 57 | 58 | 59 | async def async_check_output(command, env): 60 | PIPE = asyncio.subprocess.PIPE 61 | proc = await asyncio.create_subprocess_exec( 62 | *command, env=env, stdin=None, stdout=PIPE, stderr=PIPE 63 | ) 64 | stdout, stderr = await proc.communicate() 65 | if proc.returncode != 0: 66 | raise ConfigError( 67 | f"Exec {command[0]} returned {proc.returncode}: {stderr.decode()}" 68 | ) 69 | return stdout 70 | 71 | 72 | def sync_check_output(command, env): 73 | proc = subprocess.Popen( 74 | command, env=env, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE 75 | ) 76 | stdout, stderr = proc.communicate() 77 | if proc.returncode != 0: 78 | raise ConfigError( 79 | f"Exec {command[0]} returned {proc.returncode}: {stderr.decode()}" 80 | ) 81 | return stdout 82 | 83 | 84 | class ExecAuth(httpx.Auth): 85 | def __init__(self, exec: UserExec): 86 | self._exec = exec 87 | self._last_bearer = None 88 | 89 | def _prepare(self): 90 | exec = self._exec 91 | if exec.apiVersion not in ( 92 | "client.authentication.k8s.io/v1alpha1", 93 | "client.authentication.k8s.io/v1beta1", 94 | ): 95 | raise ConfigError( 96 | f"auth exec api version {exec.apiVersion} not implemented" 97 | ) 98 | cmd_env_vars = dict(os.environ) 99 | if exec.env: 100 | cmd_env_vars.update((var.name, var.value) for var in exec.env) 101 | # TODO: add support for passing KUBERNETES_EXEC_INFO env var 102 | # https://github.com/kubernetes/community/blob/master/contributors/design-proposals/auth/kubectl-exec-plugins.md 103 | args = exec.args if exec.args else [] 104 | return [exec.command] + args, cmd_env_vars 105 | 106 | def sync_auth_flow(self, request: httpx.Request): 107 | if self._last_bearer: 108 | request.headers["Authorization"] = self._last_bearer 109 | response = yield request 110 | if response.status_code != 401: 111 | return 112 | 113 | command, env = self._prepare() 114 | output = sync_check_output(command, env=env) 115 | token = json.loads(output)["status"]["token"] 116 | request.headers["Authorization"] = self._last_bearer = f"Bearer {token}" 117 | yield request 118 | 119 | async def async_auth_flow(self, request: httpx.Request): 120 | if self._last_bearer: 121 | request.headers["Authorization"] = self._last_bearer 122 | response = yield request 123 | if response.status_code != 401: 124 | return 125 | 126 | command, env = self._prepare() 127 | output = await async_check_output(command, env=env) 128 | token = json.loads(output)["status"]["token"] 129 | request.headers["Authorization"] = self._last_bearer = f"Bearer {token}" 130 | yield request 131 | 132 | 133 | def user_auth(user: Optional[User]): 134 | if user is None: 135 | return None 136 | 137 | if user.token is not None: 138 | return BearerAuth(user.token) 139 | 140 | if user.exec: 141 | return ExecAuth(user.exec) 142 | 143 | if user.username and user.password: 144 | return httpx.BasicAuth(user.username, user.password) 145 | 146 | if user.auth_provider: 147 | raise ConfigError("auth-provider not supported") 148 | 149 | 150 | def user_cert(user: User, abs_file): 151 | """Extract user certificates""" 152 | if user.client_cert or user.client_cert_data: 153 | return ( 154 | FileStr(user.client_cert_data) or abs_file(user.client_cert), 155 | FileStr(user.client_key_data) or abs_file(user.client_key), 156 | ) 157 | return None 158 | 159 | 160 | def verify_cluster(cluster: Cluster, user: User, abs_file, trust_env: bool = True): 161 | """setup certificate verification""" 162 | if cluster.certificate_auth: 163 | ctx = ssl.create_default_context(cafile=abs_file(cluster.certificate_auth)) 164 | elif cluster.certificate_auth_data: 165 | ctx = ssl.create_default_context(cafile=FileStr(cluster.certificate_auth_data)) 166 | else: 167 | ctx = httpx.create_ssl_context(verify=not cluster.insecure, trust_env=trust_env) 168 | cert = user_cert(user, abs_file) 169 | if cert: 170 | ctx.load_cert_chain(*cert) 171 | return ctx 172 | -------------------------------------------------------------------------------- /lightkube/config/kubeconfig.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | from typing import Dict, NamedTuple, Optional 4 | from pathlib import Path 5 | 6 | from ..core import exceptions 7 | from .models import Cluster, User, Context 8 | 9 | """ 10 | | behavior | kubectl | lightkube | 11 | |---------------------------|---------------------------|-----------------------| 12 | | current-context missing | use proxy | fail (conf is None) | 13 | | current-context wrong | fail | fail | 14 | | context.cluster missing | use proxy | fail | 15 | | context.user missing | interactive user/password | no auth set | 16 | | context.user wrong | interactive user/password | fail | 17 | | context.namespace missing | use default namespace | use default namespace | 18 | """ 19 | 20 | 21 | def to_mapping(obj_list, key, factory): 22 | return {obj["name"]: factory.from_dict(obj[key], lazy=False) for obj in obj_list} 23 | 24 | 25 | DEFAULT_NAMESPACE = "default" 26 | SERVICE_ACCOUNT = "/var/run/secrets/kubernetes.io/serviceaccount" 27 | DEFAULT_KUBECONFIG = "~/.kube/config" 28 | 29 | 30 | class SingleConfig(NamedTuple): 31 | """Represents a single configuration instance as the result of selecting a context""" 32 | #: name of the context 33 | context_name: str 34 | context: Context 35 | cluster: Cluster 36 | user: User = None 37 | fname: Path = None 38 | 39 | @property 40 | def namespace(self): 41 | """Returns the namespace in the current context""" 42 | return self.context.namespace or DEFAULT_NAMESPACE 43 | 44 | def abs_file(self, fname): 45 | """Return the absolute path of a relative file path, relatively to the configuration file""" 46 | if Path(fname).is_absolute(): 47 | return fname 48 | 49 | if self.fname is None: 50 | raise exceptions.ConfigError( 51 | f"{fname} is relative, but kubeconfig path unknown" 52 | ) 53 | 54 | return self.fname.parent.joinpath(fname) 55 | 56 | 57 | PROXY_CONF = SingleConfig( 58 | context_name="default", 59 | context=Context(cluster="default"), 60 | cluster=Cluster(server="http://localhost:8080"), 61 | ) 62 | 63 | 64 | class KubeConfig: 65 | """Class to represent a kubeconfig. See the specific constructors depending on your use case. 66 | 67 | Attributes: 68 | clusters: Dictionary of cluster name -> `Cluster` instance. 69 | contexts: Dictionary of context name -> `Context` instance. 70 | users: Dictionary of user name -> `User` instance. 71 | """ 72 | 73 | clusters: Dict[str, Cluster] 74 | users: Dict[str, User] 75 | contexts: Dict[str, Context] 76 | 77 | def __init__( 78 | self, *, clusters, contexts, users=None, current_context=None, fname=None 79 | ): 80 | """ 81 | Create the kubernetes configuration manually. Normally this constructor should not be called directly. 82 | Use a specific constructor instead. 83 | 84 | Attributes: 85 | clusters: Dictionary of cluster name -> `Cluster` instance. 86 | contexts: Dictionary of context name -> `Context` instance. 87 | users: Dictionary of user name -> `User` instance. 88 | current_context: Name of the current context. 89 | fname: Name of the file where the configuration has been readed from. 90 | """ 91 | self.current_context = current_context 92 | self.clusters = clusters 93 | self.contexts = contexts 94 | self.users = users or {} 95 | self.fname = Path(fname) if fname else None 96 | 97 | @classmethod 98 | def from_dict(cls, conf: Dict, fname=None): 99 | """Creates a KubeConfig instance from the content of a dictionary structure. 100 | 101 | **Parameters** 102 | 103 | * **conf**: Configuration structure, main attributes are `clusters`, `contexts`, `users` and `current-context`. 104 | * **fname**: File path from where this configuration has been loaded. This is needed to resolve relative paths 105 | present inside the configuration. 106 | """ 107 | return cls( 108 | current_context=conf.get("current-context"), 109 | clusters=to_mapping(conf["clusters"], "cluster", factory=Cluster), 110 | contexts=to_mapping(conf["contexts"], "context", factory=Context), 111 | users=to_mapping(conf.get("users", []), "user", factory=User), 112 | fname=fname, 113 | ) 114 | 115 | def get( 116 | self, context_name=None, default: SingleConfig = None 117 | ) -> Optional[SingleConfig]: 118 | """Returns a `SingleConfig` instance, representing the configuration matching the given `context_name`. 119 | Lightkube client will automatically call this method without parameters when an instance of `KubeConfig` 120 | is provided. 121 | 122 | **Parameters** 123 | 124 | * **context_name**: Name of the context to use. If `context_name` is undefined, the `current-context` is used. 125 | In the case both contexts are undefined, and the default is provided, this method will return the default. 126 | It will fail with an error otherwise. 127 | * **default**: Instance of a `SingleConfig` to be returned in case both contexts are not set. When this 128 | parameter is not provided and no context is defined, the method call will fail. 129 | """ 130 | if context_name is None: 131 | context_name = self.current_context 132 | if context_name is None: 133 | if default is None: 134 | raise exceptions.ConfigError( 135 | "No current context set and no default provided" 136 | ) 137 | return default 138 | try: 139 | ctx = self.contexts[context_name] 140 | except KeyError: 141 | raise exceptions.ConfigError(f"Context '{context_name}' not found") 142 | return SingleConfig( 143 | context_name=context_name, 144 | context=ctx, 145 | cluster=self.clusters[ctx.cluster], 146 | user=self.users[ctx.user] if ctx.user else None, 147 | fname=self.fname, 148 | ) 149 | 150 | @classmethod 151 | def from_file(cls, fname): 152 | """Creates an instance of the KubeConfig class from a kubeconfig file in YAML format. 153 | 154 | **Parameters** 155 | 156 | * **fname**: Path to the kuberneted configuration. 157 | """ 158 | filepath = Path(fname).expanduser() 159 | if not filepath.is_file(): 160 | raise exceptions.ConfigError(f"Configuration file {fname} not found") 161 | with filepath.open() as f: 162 | return cls.from_dict(yaml.safe_load(f.read()), fname=filepath) 163 | 164 | @classmethod 165 | def from_one( 166 | cls, *, cluster, user=None, context_name="default", namespace=None, fname=None 167 | ): 168 | """Creates an instance of the KubeConfig class from one cluster and one user configuration""" 169 | context = Context( 170 | cluster=context_name, 171 | user=context_name if user else None, 172 | namespace=namespace, 173 | ) 174 | return cls( 175 | clusters={context_name: cluster}, 176 | contexts={context_name: context}, 177 | users={context_name: user} if user else None, 178 | current_context=context_name, 179 | fname=fname, 180 | ) 181 | 182 | @classmethod 183 | def from_server(cls, url, namespace=None): 184 | """Creates an instance of the KubeConfig class from the cluster server url""" 185 | return cls.from_one(cluster=Cluster(server=url), namespace=namespace) 186 | 187 | @classmethod 188 | def from_service_account(cls, service_account=SERVICE_ACCOUNT): 189 | """Creates a configuration from in-cluster service account information. 190 | 191 | **Parameters** 192 | 193 | * **service_account**: Allows to override the default service account directory path. 194 | Default `/var/run/secrets/kubernetes.io/serviceaccount`. 195 | """ 196 | account_dir = Path(service_account) 197 | 198 | try: 199 | token = account_dir.joinpath("token").read_text() 200 | namespace = account_dir.joinpath("namespace").read_text() 201 | except FileNotFoundError as e: 202 | raise exceptions.ConfigError(str(e)) 203 | 204 | host = os.environ["KUBERNETES_SERVICE_HOST"] 205 | port = os.environ["KUBERNETES_SERVICE_PORT"] 206 | if ":" in host: # ipv6 207 | host = f"[{host}]" 208 | return cls.from_one( 209 | cluster=Cluster( 210 | server=f"https://{host}:{port}", 211 | certificate_auth=str(account_dir.joinpath("ca.crt")), 212 | ), 213 | user=User(token=token), 214 | namespace=namespace, 215 | ) 216 | 217 | @classmethod 218 | def from_env( 219 | cls, service_account=SERVICE_ACCOUNT, default_config=DEFAULT_KUBECONFIG 220 | ): 221 | """Attempts to load the configuration automatically looking at the environment and filesystem. 222 | 223 | The method will attempt to load a configuration using the following order: 224 | 225 | * in-cluster config. 226 | * config file defined in `KUBECONFIG` environment variable. 227 | * configuration file present on the default location. 228 | 229 | **Parameters** 230 | 231 | * **service_account**: Allows to override the default service account directory path. 232 | Default `/var/run/secrets/kubernetes.io/serviceaccount`. 233 | * **default_config**: Allows to override the default configuration location. Default `~/.kube/config`. 234 | """ 235 | try: 236 | return KubeConfig.from_service_account(service_account=service_account) 237 | except exceptions.ConfigError: 238 | return KubeConfig.from_file(os.environ.get("KUBECONFIG", default_config)) 239 | -------------------------------------------------------------------------------- /lightkube/config/models.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | from dataclasses import dataclass, field 3 | import tempfile 4 | import base64 5 | 6 | from ..core.dataclasses_dict import DataclassDictMixIn 7 | 8 | 9 | class FileStr(str): 10 | def __new__(cls, data): 11 | if data is None: 12 | return None 13 | 14 | f = tempfile.NamedTemporaryFile() 15 | f.write(base64.b64decode(data)) 16 | f.flush() 17 | file = str.__new__(cls, f.name) 18 | file.handler = f 19 | return file 20 | 21 | def __del__(self): 22 | if self.handler: 23 | self.handler.close() 24 | self.handler = None 25 | 26 | 27 | @dataclass 28 | class Context(DataclassDictMixIn): 29 | cluster: str 30 | user: str = None 31 | namespace: str = None 32 | 33 | 34 | @dataclass 35 | class NameValue(DataclassDictMixIn): 36 | name: str 37 | value: str 38 | 39 | 40 | @dataclass 41 | class UserExec(DataclassDictMixIn): 42 | apiVersion: str 43 | command: str = None 44 | env: List[NameValue] = field(default_factory=list) 45 | args: List[str] = field(default_factory=list) 46 | installHint: str = None 47 | 48 | 49 | @dataclass 50 | class User(DataclassDictMixIn): 51 | exec: UserExec = None 52 | username: str = None 53 | password: str = None 54 | token: str = None 55 | auth_provider: Dict = field(metadata={"json": "auth-provider"}, default=None) 56 | client_cert: str = field(metadata={"json": "client-certificate"}, default=None) 57 | client_cert_data: str = field( 58 | metadata={"json": "client-certificate-data"}, default=None 59 | ) 60 | client_key: str = field(metadata={"json": "client-key"}, default=None) 61 | client_key_data: str = field(metadata={"json": "client-key-data"}, default=None) 62 | 63 | 64 | @dataclass 65 | class Cluster(DataclassDictMixIn): 66 | """ 67 | Attributes: 68 | server: the server name 69 | """ 70 | server: str = "http://localhost:8080" 71 | certificate_auth: str = field( 72 | metadata={"json": "certificate-authority"}, default=None 73 | ) 74 | certificate_auth_data: str = field( 75 | metadata={"json": "certificate-authority-data"}, default=None 76 | ) 77 | insecure: bool = field(metadata={"json": "insecure-skip-tls-verify"}, default=False) 78 | -------------------------------------------------------------------------------- /lightkube/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gtsystem/lightkube/ddeb2928c3de93b48e13933e5ad07d0ed7b115fc/lightkube/core/__init__.py -------------------------------------------------------------------------------- /lightkube/core/dataclasses_dict.py: -------------------------------------------------------------------------------- 1 | import typing 2 | from typing import Union 3 | from datetime import datetime 4 | import dataclasses as dc 5 | 6 | fromisoformat = datetime.fromisoformat 7 | 8 | 9 | from .typing_extra import get_args, get_origin 10 | 11 | 12 | def to_datetime(string): 13 | return fromisoformat(string.replace("Z", "+00:00")) 14 | 15 | 16 | def from_datetime(dt): 17 | return dt.isoformat().replace("+00:00", "Z") 18 | 19 | 20 | class ConverterFunc(typing.NamedTuple): 21 | from_json_type: typing.Callable 22 | to_json_type: typing.Callable 23 | 24 | 25 | TYPE_CONVERTERS = { 26 | datetime: ConverterFunc(from_json_type=to_datetime, to_json_type=from_datetime) 27 | } 28 | 29 | EMPTY_DICT = {} 30 | 31 | 32 | class Converter(typing.NamedTuple): 33 | is_list: bool 34 | supp_kw: bool 35 | func: typing.Callable 36 | 37 | def __call__(self, value, kw): 38 | if not self.supp_kw: 39 | kw = EMPTY_DICT 40 | if self.is_list: 41 | f = self.func 42 | return [f(_, **kw) for _ in value] 43 | return self.func(value, **kw) 44 | 45 | 46 | def nohop(x, kw): 47 | return x 48 | 49 | 50 | def is_dataclass_json(cls): 51 | return dc.is_dataclass(cls) and issubclass(cls, DataclassDictMixIn) 52 | 53 | 54 | NoneType = type(None) 55 | 56 | 57 | def _remove_optional(tp): 58 | if get_origin(tp) is Union: 59 | args = get_args(tp) 60 | if args[1] is NoneType: 61 | return args[0] 62 | return tp 63 | 64 | 65 | def get_type_hints(cl): 66 | types = typing.get_type_hints(cl) 67 | return {k: _remove_optional(v) for k, v in types.items()} 68 | 69 | 70 | def extract_types(cls, is_to=True): 71 | func_name = "to_json_type" if is_to else "from_json_type" 72 | method_name = "to_dict" if is_to else "from_dict" 73 | types = get_type_hints(cls) 74 | for field in dc.fields(cls): 75 | k = field.name 76 | t = types[k] 77 | 78 | if get_origin(t) is list: 79 | is_list = True 80 | t = get_args(t)[0] 81 | else: 82 | is_list = False 83 | 84 | if is_dataclass_json(t): 85 | yield k, Converter( 86 | is_list=is_list, supp_kw=True, func=getattr(t, method_name) 87 | ), field.default 88 | elif t in TYPE_CONVERTERS: 89 | yield k, Converter( 90 | is_list=is_list, 91 | supp_kw=False, 92 | func=getattr(TYPE_CONVERTERS[t], func_name), 93 | ), field.default 94 | else: 95 | if is_to: 96 | yield k, nohop, field.default 97 | 98 | 99 | class LazyAttribute: 100 | def __init__(self, key, convert): 101 | self.key = key 102 | self.convert = convert 103 | 104 | def __get__(self, instance, owner): 105 | value = instance._lazy_values[self.key] 106 | if value is not None: 107 | value = self.convert(value, instance._lazy_kwargs) 108 | setattr(instance, self.key, value) 109 | return value 110 | 111 | 112 | class DataclassDictMixIn: 113 | _late_init_from: typing.List = None 114 | _late_init_to: typing.List = None 115 | _json_to_prop: typing.Dict = None 116 | _prop_to_json: typing.Dict = None 117 | _valid_params: typing.Set = None 118 | 119 | def __setattr__(self, name, value): 120 | if name in getattr(self, "_lazy_values", {}): 121 | del self._lazy_values[name] 122 | self.__dict__[name] = value 123 | 124 | @classmethod 125 | def _setup(cls): 126 | if cls._late_init_from is None: 127 | cls._late_init_from = list(t[:2] for t in extract_types(cls, is_to=False)) 128 | for k, convert in cls._late_init_from: 129 | setattr(cls, k, LazyAttribute(k, convert)) 130 | cls._prop_to_json = { 131 | field.name: field.metadata["json"] 132 | for field in dc.fields(cls) 133 | if "json" in field.metadata 134 | } 135 | cls._json_to_prop = {v: k for k, v in cls._prop_to_json.items()} 136 | cls._late_init_to = list(extract_types(cls, is_to=True)) 137 | cls._valid_params = {f.name for f in dc.fields(cls)} 138 | 139 | @classmethod 140 | def from_dict(cls, d, lazy=True): 141 | cls._setup() 142 | kwargs = dict(lazy=lazy) 143 | params = cls._valid_params 144 | valid_d = {} 145 | transform = cls._json_to_prop.get 146 | for k, v in d.items(): 147 | k = transform(k, k) 148 | if k in params: 149 | valid_d[k] = v 150 | obj = cls(**valid_d) 151 | if lazy: 152 | obj._lazy_values = {} 153 | obj._lazy_kwargs = kwargs 154 | for k, _ in cls._late_init_from: 155 | obj._lazy_values[k] = getattr(obj, k) 156 | delattr(obj, k) 157 | else: 158 | d = obj.__dict__ 159 | for k, convert in cls._late_init_from: 160 | if d[k] is not None: 161 | d[k] = convert(d[k], kwargs) 162 | return obj 163 | 164 | def to_dict(self, dict_factory=dict): 165 | self._setup() 166 | kwargs = dict(dict_factory=dict_factory) 167 | result = [] 168 | lazy_attr = getattr(self, "_lazy_values", None) 169 | key_transform = self._prop_to_json.get 170 | for k, conv_f, default in self._late_init_to: 171 | if lazy_attr is not None and k in lazy_attr: 172 | value = lazy_attr[k] 173 | else: 174 | value = getattr(self, k) 175 | if value == default: 176 | continue 177 | value = conv_f(value, kwargs) 178 | if value is not None: 179 | result.append((key_transform(k, k), value)) 180 | return dict_factory(result) 181 | -------------------------------------------------------------------------------- /lightkube/core/exceptions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Exceptions. 3 | """ 4 | 5 | import httpx 6 | 7 | from .internal_models import meta_v1 8 | 9 | 10 | class ConfigError(Exception): 11 | """ 12 | Configuration specific errors. 13 | """ 14 | 15 | pass 16 | 17 | 18 | class NotReadyError(Exception): 19 | """ 20 | Some information is not ready yet. 21 | """ 22 | 23 | def __init__(self, name: str, message: str) -> None: 24 | super().__init__() 25 | self.name = name 26 | self.message = message 27 | 28 | def __str__(self) -> str: 29 | return f"{self.name} is not ready yet: {self.message}" 30 | 31 | 32 | class ApiError(httpx.HTTPStatusError): 33 | status: "meta_v1.Status" 34 | 35 | def __init__( 36 | self, request: httpx.Request = None, response: httpx.Response = None 37 | ) -> None: 38 | self.status = meta_v1.Status.from_dict(response.json()) 39 | super().__init__(self.status.message, request=request, response=response) 40 | 41 | 42 | class LoadResourceError(Exception): 43 | """ 44 | Error in loading a resource 45 | """ 46 | 47 | 48 | class ObjectDeleted(Exception): 49 | """ 50 | Object was unexpectedly deleted 51 | """ 52 | 53 | def __init__(self, name): 54 | self.name = name 55 | 56 | def __str__(self): 57 | return f"{self.name} was unexpectedly deleted" 58 | 59 | 60 | class ConditionError(Exception): 61 | """ 62 | Object is in specified bad condition 63 | """ 64 | 65 | def __init__(self, name, messages): 66 | self.name = name 67 | self.messages = messages 68 | 69 | def __str__(self): 70 | messages = "; ".join(self.messages) 71 | return f"{self.name} has failure condition(s): {messages}" 72 | -------------------------------------------------------------------------------- /lightkube/core/generic_client.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import ( 3 | AsyncIterable, 4 | Type, 5 | Any, 6 | Dict, 7 | Union, 8 | Iterator, 9 | AsyncIterator, 10 | Tuple, 11 | TypeVar, 12 | Iterable, 13 | Optional, 14 | ) 15 | import dataclasses 16 | from dataclasses import dataclass 17 | import json 18 | import asyncio 19 | 20 | import httpx 21 | 22 | from . import resource as r 23 | from ..config.kubeconfig import KubeConfig, SingleConfig, DEFAULT_KUBECONFIG 24 | from ..config import client_adapter 25 | from .exceptions import ApiError, NotReadyError 26 | from ..types import OnErrorAction, OnErrorHandler, on_error_raise, PatchType 27 | 28 | 29 | ALL_NS = "*" 30 | 31 | 32 | def transform_exception(e: httpx.HTTPError): 33 | if ( 34 | isinstance(e, httpx.HTTPStatusError) 35 | and e.response.headers["Content-Type"] == "application/json" 36 | ): 37 | return ApiError(request=e.request, response=e.response) 38 | return e 39 | 40 | 41 | METHOD_MAPPING = { 42 | "delete": "DELETE", 43 | "deletecollection": "DELETE", 44 | "get": "GET", 45 | "global_list": "GET", 46 | "global_watch": "GET", 47 | "list": "GET", 48 | "patch": "PATCH", 49 | "post": "POST", 50 | "put": "PUT", 51 | "watch": "GET", 52 | } 53 | 54 | 55 | @dataclass 56 | class BasicRequest: 57 | method: str 58 | url: str 59 | response_type: Any 60 | params: Dict[str, str] = dataclasses.field(default_factory=dict) 61 | data: Any = None 62 | headers: Dict[str, str] = None 63 | 64 | 65 | class WatchDriver: 66 | def __init__(self, br: BasicRequest, build_request, lazy): 67 | self._version = br.params.get("resourceVersion") 68 | self._convert = br.response_type.from_dict 69 | self._br = br 70 | self._build_request = build_request 71 | self._lazy = lazy 72 | 73 | def get_request(self, timeout): 74 | br = self._br 75 | if self._version is not None: 76 | br.params["resourceVersion"] = self._version 77 | return self._build_request(br.method, br.url, params=br.params, timeout=timeout) 78 | 79 | def process_one_line(self, line): 80 | line = json.loads(line) 81 | tp = line["type"] 82 | obj = line["object"] 83 | self._version = obj["metadata"]["resourceVersion"] 84 | return tp, self._convert(obj, lazy=self._lazy) 85 | 86 | 87 | T = TypeVar("T") 88 | 89 | 90 | class ListIterable(Iterable[T]): 91 | 92 | _resourceVersion: Optional[str] = None 93 | 94 | @property 95 | def resourceVersion(self) -> str: 96 | """Returns the resource version at which the collection was constructed. 97 | Note: this property is only available after the iteration started and will raise NotReadyError otherwise 98 | """ 99 | if self._resourceVersion: 100 | return self._resourceVersion 101 | raise NotReadyError( 102 | "resourceVersion", "only available after the iteration started" 103 | ) 104 | 105 | def __init__(self, inner_iter: Iterator[Tuple[str, Iterator[T]]]) -> None: 106 | self._inner_iter = inner_iter 107 | 108 | def __iter__(self) -> Iterator[T]: 109 | for rv, chunk in self._inner_iter: 110 | self._resourceVersion = rv 111 | yield from chunk 112 | 113 | 114 | class ListAsyncIterable(AsyncIterable[T]): 115 | 116 | _resourceVersion: Optional[str] = None 117 | 118 | @property 119 | def resourceVersion(self) -> str: 120 | """Returns the resource version at which the collection was constructed. 121 | Note: this property is only available after the iteration started and will raise NotReadyError otherwise 122 | """ 123 | if self._resourceVersion: 124 | return self._resourceVersion 125 | raise NotReadyError( 126 | "resourceVersion", "only available after the iteration started" 127 | ) 128 | 129 | def __init__(self, inner_iter: AsyncIterator[Tuple[str, Iterator[T]]]) -> None: 130 | self._inner_iter = inner_iter 131 | 132 | async def __aiter__(self) -> AsyncIterator[T]: 133 | async for rv, chunk in self._inner_iter: 134 | self._resourceVersion = rv 135 | for item in chunk: 136 | yield item 137 | 138 | 139 | class GenericClient: 140 | AdapterClient = staticmethod(client_adapter.Client) 141 | 142 | def __init__( 143 | self, 144 | config: Union[SingleConfig, KubeConfig] = None, 145 | namespace: str = None, 146 | timeout: httpx.Timeout = None, 147 | lazy=True, 148 | trust_env: bool = True, 149 | field_manager: str = None, 150 | dry_run: bool = False, 151 | transport: Union[httpx.BaseTransport, httpx.AsyncBaseTransport] = None, 152 | proxy: str = None, 153 | ): 154 | self._timeout = httpx.Timeout(10) if timeout is None else timeout 155 | self._watch_timeout = httpx.Timeout(self._timeout) 156 | self._watch_timeout.read = None 157 | self._lazy = lazy 158 | if config is None and trust_env: 159 | config = KubeConfig.from_env().get() 160 | elif config is None and not trust_env: 161 | config = KubeConfig.from_file(DEFAULT_KUBECONFIG).get() 162 | elif isinstance(config, KubeConfig): 163 | config = config.get() 164 | 165 | self.config = config 166 | self._client = self.AdapterClient( 167 | config, timeout, trust_env=trust_env, transport=transport, proxy=proxy 168 | ) 169 | self._field_manager = field_manager 170 | self._dry_run = dry_run 171 | self.namespace = namespace if namespace else config.namespace 172 | 173 | def prepare_request( 174 | self, 175 | method, 176 | res: Type[r.Resource] = None, 177 | obj=None, 178 | name=None, 179 | namespace=None, 180 | watch: bool = False, 181 | params: dict = None, 182 | headers: dict = None, 183 | ) -> BasicRequest: 184 | if params is not None: 185 | params = {k: v for k, v in params.items() if v is not None} 186 | else: 187 | params = {} 188 | if headers is not None: 189 | headers = {k: v for k, v in headers.items() if v is not None} 190 | data = None 191 | if res is None: 192 | if obj is None: 193 | raise ValueError( 194 | "At least a resource or an instance of a resource need to be provided" 195 | ) 196 | res = obj.__class__ 197 | 198 | namespaced = issubclass(res, (r.NamespacedResource, r.NamespacedSubResource)) 199 | 200 | if namespace == ALL_NS: 201 | if not issubclass(res, r.NamespacedResourceG): 202 | raise ValueError(f"Class {res} doesn't support global {method}") 203 | if method not in ("list", "watch"): 204 | raise ValueError( 205 | "Only methods 'list' and 'watch' can be called for all namespaces" 206 | ) 207 | real_method = "global_watch" if watch else "global_" + method 208 | else: 209 | real_method = "watch" if watch else method 210 | 211 | api_info = r.api_info(res) 212 | if real_method not in api_info.verbs: 213 | if watch: 214 | raise ValueError(f"Resource '{res.__name__}' is not watchable") 215 | else: 216 | raise ValueError( 217 | f"method '{method}' not supported by resource '{res.__name__}'" 218 | ) 219 | 220 | if watch: 221 | params["watch"] = "true" 222 | 223 | if api_info.parent is None: 224 | base = api_info.resource 225 | else: 226 | base = api_info.parent 227 | 228 | if base.group == "": 229 | path = ["api", base.version] 230 | else: 231 | path = ["apis", base.group, base.version] 232 | 233 | if namespaced and namespace != ALL_NS: 234 | if method in ("post", "put") and obj.metadata.namespace is not None: 235 | if namespace is None: 236 | namespace = obj.metadata.namespace 237 | elif namespace != obj.metadata.namespace: 238 | raise ValueError( 239 | f"The namespace value '{namespace}' differ from the " 240 | f"namespace in the object metadata '{obj.metadata.namespace}'" 241 | ) 242 | if namespace is None: 243 | namespace = self.namespace 244 | path.extend(["namespaces", namespace]) 245 | 246 | if method in ("post", "put", "patch"): 247 | if self._field_manager is not None and "fieldManager" not in params: 248 | params["fieldManager"] = self._field_manager 249 | if self._dry_run is True and "dryRun" not in params: 250 | params["dryRun"] = "All" 251 | if ( 252 | method == "patch" 253 | and headers["Content-Type"] == PatchType.APPLY.value 254 | and "fieldManager" not in params 255 | ): 256 | raise ValueError( 257 | 'Parameter "field_manager" is required for PatchType.APPLY' 258 | ) 259 | if obj is None: 260 | raise ValueError("obj is required for post, put or patch") 261 | 262 | if method == "patch" and not isinstance(obj, r.Resource): 263 | data = obj 264 | else: 265 | data = obj.to_dict() 266 | # The following block, ensures that apiVersion and kind are always set. 267 | # this is needed as k8s fails if this data are not provided for objects derived by CRDs (Issue #27) 268 | if "apiVersion" not in data: 269 | data["apiVersion"] = api_info.resource.api_version 270 | if "kind" not in data: 271 | data["kind"] = api_info.resource.kind 272 | 273 | path.append(api_info.plural) 274 | if method in ("delete", "get", "patch", "put") or api_info.action: 275 | if name is None and method == "put": 276 | name = obj.metadata.name 277 | if name is None: 278 | raise ValueError("resource name not defined") 279 | path.append(name) 280 | 281 | if api_info.action: 282 | path.append(api_info.action) 283 | 284 | http_method = METHOD_MAPPING[method] 285 | if http_method == "DELETE": 286 | res = None 287 | 288 | return BasicRequest( 289 | method=http_method, 290 | url="/".join(path), 291 | params=params, 292 | response_type=res, 293 | data=data, 294 | headers=headers, 295 | ) 296 | 297 | @staticmethod 298 | def raise_for_status(resp): 299 | try: 300 | resp.raise_for_status() 301 | except httpx.HTTPError as e: 302 | raise transform_exception(e) 303 | 304 | def build_adapter_request(self, br: BasicRequest): 305 | return self._client.build_request( 306 | br.method, br.url, params=br.params, json=br.data, headers=br.headers 307 | ) 308 | 309 | def convert_to_resource(self, res: Type[r.Resource], item: dict) -> r.Resource: 310 | resource_def = r.api_info(res).resource 311 | item.setdefault("apiVersion", resource_def.api_version) 312 | item.setdefault("kind", resource_def.kind) 313 | return res.from_dict(item, lazy=self._lazy) 314 | 315 | def handle_response(self, method, resp, br): 316 | self.raise_for_status(resp) 317 | res = br.response_type 318 | if res is None: 319 | # TODO: delete/deletecollection actions normally return a Status object, we may want to return it as well 320 | return 321 | data = resp.json() 322 | if method == "list": 323 | if "metadata" in data and data["metadata"].get("continue"): 324 | cont = True 325 | br.params["continue"] = data["metadata"]["continue"] 326 | else: 327 | cont = False 328 | try: 329 | rv = data["metadata"]["resourceVersion"] 330 | except KeyError: 331 | rv = None 332 | return ( 333 | cont, 334 | rv, 335 | (self.convert_to_resource(res, obj) for obj in data["items"]), 336 | ) 337 | else: 338 | if res is not None: 339 | return self.convert_to_resource(res, data) 340 | 341 | 342 | class GenericSyncClient(GenericClient): 343 | def send(self, req, stream=False): 344 | return self._client.send(req, stream=stream) 345 | 346 | def watch(self, br: BasicRequest, on_error: OnErrorHandler = on_error_raise): 347 | wd = WatchDriver(br, self._client.build_request, self._lazy) 348 | err_count = 0 349 | while True: 350 | req = wd.get_request(timeout=self._watch_timeout) 351 | resp = self.send(req, stream=True) 352 | try: 353 | resp.raise_for_status() 354 | err_count = 0 355 | for line in resp.iter_lines(): 356 | yield wd.process_one_line(line) 357 | except Exception as e: 358 | err_count += 1 359 | handle_error = on_error(e, err_count) 360 | if handle_error.action is OnErrorAction.RAISE: 361 | raise 362 | if handle_error.action is OnErrorAction.STOP: 363 | break 364 | if handle_error.sleep > 0: 365 | time.sleep(handle_error.sleep) 366 | continue 367 | 368 | def request( 369 | self, 370 | method, 371 | res: Type[r.Resource] = None, 372 | obj=None, 373 | name=None, 374 | namespace=None, 375 | watch: bool = False, 376 | headers: dict = None, 377 | params: dict = None, 378 | ) -> Any: 379 | br = self.prepare_request( 380 | method, res, obj, name, namespace, watch, headers=headers, params=params 381 | ) 382 | req = self.build_adapter_request(br) 383 | resp = self.send(req) 384 | return self.handle_response(method, resp, br) 385 | 386 | def list_chunks(self, br: BasicRequest) -> Iterator[Tuple[str, Iterator]]: 387 | cont = True 388 | while cont: 389 | req = self.build_adapter_request(br) 390 | resp = self.send(req) 391 | cont, rv, chunk = self.handle_response("list", resp, br) 392 | yield rv, chunk 393 | 394 | def list(self, br: BasicRequest) -> ListIterable: 395 | return ListIterable(self.list_chunks(br)) 396 | 397 | 398 | class GenericAsyncClient(GenericClient): 399 | AdapterClient = staticmethod(client_adapter.AsyncClient) 400 | 401 | async def send(self, req, stream=False): 402 | return await self._client.send(req, stream=stream) 403 | 404 | async def watch(self, br: BasicRequest, on_error: OnErrorHandler = on_error_raise): 405 | wd = WatchDriver(br, self._client.build_request, self._lazy) 406 | err_count = 0 407 | while True: 408 | req = wd.get_request(timeout=self._watch_timeout) 409 | resp = await self.send(req, stream=True) 410 | try: 411 | resp.raise_for_status() 412 | err_count = 0 413 | async for line in resp.aiter_lines(): 414 | yield wd.process_one_line(line) 415 | except Exception as e: 416 | err_count += 1 417 | handle_error = on_error(e, err_count) 418 | if handle_error.action is OnErrorAction.RAISE: 419 | raise 420 | if handle_error.action is OnErrorAction.STOP: 421 | break 422 | if handle_error.sleep > 0: 423 | await asyncio.sleep(handle_error.sleep) 424 | continue 425 | finally: 426 | await resp.aclose() 427 | 428 | async def request( 429 | self, 430 | method, 431 | res: Type[r.Resource] = None, 432 | obj=None, 433 | name=None, 434 | namespace=None, 435 | watch: bool = False, 436 | headers: dict = None, 437 | params: dict = None, 438 | ) -> Any: 439 | br = self.prepare_request( 440 | method, res, obj, name, namespace, watch, headers=headers, params=params 441 | ) 442 | req = self.build_adapter_request(br) 443 | resp = await self.send(req) 444 | return self.handle_response(method, resp, br) 445 | 446 | async def list_chunks( 447 | self, br: BasicRequest 448 | ) -> AsyncIterator[Tuple[str, Iterator]]: 449 | cont = True 450 | while cont: 451 | req = self.build_adapter_request(br) 452 | resp = await self.send(req) 453 | cont, rv, chunk = self.handle_response("list", resp, br) 454 | yield rv, chunk 455 | 456 | def list(self, br: BasicRequest) -> ListAsyncIterable: 457 | return ListAsyncIterable(self.list_chunks(br)) 458 | 459 | async def close(self): 460 | await self._client.aclose() 461 | -------------------------------------------------------------------------------- /lightkube/core/internal_models.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | try: 4 | from ..models import meta_v1, autoscaling_v1, core_v1 5 | 6 | except: 7 | if ( 8 | sys.modules["__main__"].__package__ != "mkdocs" 9 | ): # we ignore this import error during documentation generation 10 | raise 11 | from unittest import mock 12 | 13 | class ObjectMeta: 14 | pass 15 | 16 | meta_v1 = mock.Mock() 17 | meta_v1.ObjectMeta = ObjectMeta 18 | 19 | class Scale: 20 | pass 21 | 22 | autoscaling_v1 = mock.Mock() 23 | autoscaling_v1.Scale = Scale 24 | 25 | class ResourceRequirements: 26 | pass 27 | 28 | core_v1 = mock.Mock() 29 | core_v1.ResourceRequirements = ResourceRequirements 30 | -------------------------------------------------------------------------------- /lightkube/core/internal_resources.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | try: 4 | from ..resources import core_v1 5 | 6 | try: 7 | from ..resources import apiextensions_v1 as apiextensions 8 | except: 9 | from ..resources import apiextensions_v1beta1 as apiextensions 10 | except: 11 | if ( 12 | sys.modules["__main__"].__package__ != "mkdocs" 13 | ): # we ignore this import error during documentation generation 14 | raise 15 | from unittest import mock 16 | 17 | class CustomResourceDefinition: 18 | pass 19 | 20 | apiextensions = mock.Mock() 21 | apiextensions.CustomResourceDefinition = CustomResourceDefinition 22 | 23 | class PodLog: 24 | pass 25 | 26 | core_v1 = mock.Mock() 27 | core_v1.PodLog = PodLog 28 | -------------------------------------------------------------------------------- /lightkube/core/resource.py: -------------------------------------------------------------------------------- 1 | from typing import NamedTuple, List, Optional, Type, Union 2 | from dataclasses import dataclass 3 | 4 | 5 | class ResourceDef(NamedTuple): 6 | group: str 7 | version: str 8 | kind: str 9 | 10 | @property 11 | def api_version(self): 12 | return f"{self.group}/{self.version}" if self.group else self.version 13 | 14 | 15 | @dataclass 16 | class ApiInfo: 17 | resource: ResourceDef 18 | plural: str 19 | verbs: List[str] 20 | parent: Optional[ResourceDef] = None 21 | action: str = None 22 | 23 | 24 | class Resource: 25 | _api_info: ApiInfo 26 | 27 | 28 | def api_info(res: Union[Resource, Type[Resource]]): 29 | return res._api_info 30 | 31 | 32 | class NamespacedResource(Resource): 33 | pass 34 | 35 | 36 | class NamespacedSubResource(Resource): 37 | pass 38 | 39 | 40 | class GlobalResource(Resource): 41 | pass 42 | 43 | 44 | class NamespacedResourceG(NamespacedResource): 45 | pass 46 | 47 | 48 | class GlobalSubResource(Resource): 49 | pass 50 | -------------------------------------------------------------------------------- /lightkube/core/resource_registry.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | from typing import Optional, Type, TypeVar, Union 3 | 4 | from lightkube.core import resource as res 5 | from lightkube.core.exceptions import LoadResourceError 6 | 7 | AnyResource = Union[res.NamespacedResource, res.GlobalResource] 8 | AnyResourceType = Type[AnyResource] 9 | AnyResourceTypeVar = TypeVar("AnyResourceTypeVar", bound=AnyResourceType) 10 | 11 | 12 | def _load_internal_resource(version, kind): 13 | if "/" in version: 14 | group, version_n = version.split("/") 15 | # Generic resource not defined, but it could be a k8s resource 16 | if group.endswith(".k8s.io"): 17 | group = group[:-7] 18 | group = group.replace(".", "_") 19 | module_name = "_".join([group, version_n]) 20 | else: 21 | module_name = f"core_{version}" 22 | 23 | module = importlib.import_module(f"lightkube.resources.{module_name.lower()}") 24 | try: 25 | return getattr(module, kind) 26 | except AttributeError: 27 | raise LoadResourceError( 28 | f"Cannot find resource kind '{kind}' in module {module.__name__}" 29 | ) 30 | 31 | 32 | def _maybe_internal(version): 33 | if "/" not in version: 34 | return True 35 | 36 | group = version.split("/")[0] 37 | # internal resources don't have namespace or end in .k8s.io 38 | return group.endswith(".k8s.io") or "." not in group 39 | 40 | 41 | class ResourceRegistry: 42 | """Resource Registry used to load standard resources or to register custom resources""" 43 | 44 | _registry: dict 45 | 46 | def __init__(self): 47 | self._registry = {} 48 | 49 | def register(self, resource: AnyResourceTypeVar) -> AnyResourceTypeVar: 50 | """Register a custom resource 51 | 52 | **parameters** 53 | 54 | * **resource** - Resource class to register. 55 | 56 | **returns** The `resource` class provided 57 | """ 58 | info = resource._api_info 59 | version = ( 60 | f"{info.resource.group}/{info.resource.version}" 61 | if info.resource.group 62 | else info.resource.version 63 | ) 64 | res_key = (version, info.resource.kind) 65 | 66 | if res_key in self._registry: 67 | registered_resource = self._registry[res_key] 68 | if registered_resource is resource: # already present 69 | return registered_resource 70 | raise ValueError( 71 | f"Another class for resource '{info.resource.kind}' is already registered" 72 | ) 73 | 74 | self._registry[res_key] = resource 75 | return resource 76 | 77 | def clear(self): 78 | """Clear the registry from all registered resources""" 79 | self._registry.clear() 80 | 81 | def get(self, version: str, kind: str) -> Optional[Type[AnyResource]]: 82 | """Get a resource from the registry matching the given `version` and `kind`. 83 | 84 | **parameters** 85 | 86 | * **version** - Version of the resource as defined in the kubernetes definition. Example `example.com/v1` 87 | * **kind** - Resource kind. Example `CronJob` 88 | 89 | **returns** A `resource` class or `None` if there is no match in the registry. 90 | """ 91 | return self._registry.get((version, kind)) 92 | 93 | def load(self, version, kind) -> Optional[Type[AnyResource]]: 94 | """Load a standard resource from `lightkube.resources` given `version` and `kind`. 95 | This method look up the registry first and import the resource from the module only if it's not available there. 96 | 97 | * **version** - Version of the resource as defined in the kubernetes definition. Example `apps/v1` 98 | * **kind** - Resource kind. Example `Pod` 99 | 100 | **returns** A `resource` class if the resource is found. Otherwise an exception is raised 101 | """ 102 | # check if this resource is in the registry 103 | resource = self.get(version, kind) 104 | if resource is not None: 105 | return resource 106 | 107 | # if not, we attempt to load from lightkube.resources 108 | if _maybe_internal(version): 109 | try: 110 | return self.register(_load_internal_resource(version, kind)) 111 | except ImportError: 112 | pass 113 | 114 | raise LoadResourceError( 115 | f"Cannot find resource {kind} of group {version}. " 116 | "If using a CRD, ensure a generic resource is defined." 117 | ) 118 | 119 | 120 | resource_registry = ResourceRegistry() 121 | -------------------------------------------------------------------------------- /lightkube/core/schema.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module exposes dependencies used by lightkube-models 3 | 4 | These dependencies are here because we may decide to replace dataclasses with something else in the future 5 | """ 6 | __all__ = ["dataclass", "field", "DictMixin"] 7 | from dataclasses import dataclass, field 8 | from .dataclasses_dict import DataclassDictMixIn as DictMixin 9 | -------------------------------------------------------------------------------- /lightkube/core/selector.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Union, List 2 | from collections.abc import Iterable 3 | from lightkube import operators 4 | 5 | FIELDS_SUPPORT = ("equal", "not_equal", "not_in") 6 | FIELDS_SUPPORT_STR = ", ".join(f'"{fs}"' for fs in FIELDS_SUPPORT) 7 | 8 | 9 | def build_selector(pairs: Union[List, Dict], for_fields=False): 10 | res = [] 11 | if not isinstance(pairs, list): 12 | pairs = pairs.items() 13 | for k, v in pairs: 14 | if v is None: 15 | v = operators.exists() 16 | elif isinstance(v, str): 17 | v = operators.equal(v) 18 | elif isinstance(v, Iterable): 19 | v = operators.in_(v) 20 | 21 | if not isinstance(v, operators.Operator): 22 | raise ValueError( 23 | f"selector value '{v}' should be str, None, Iterable or instance of operator" 24 | ) 25 | 26 | if for_fields and v.op_name not in FIELDS_SUPPORT: 27 | raise ValueError( 28 | f"parameter 'fields' only support operators {FIELDS_SUPPORT_STR}" 29 | ) 30 | 31 | if ( 32 | for_fields and v.op_name == "not_in" 33 | ): # not_in can be implement using several != 34 | for item in v.value: 35 | res.append(operators.not_equal(item).encode(k)) 36 | else: 37 | res.append(v.encode(k)) 38 | return ",".join(res) 39 | -------------------------------------------------------------------------------- /lightkube/core/sort_objects.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from ..core import resource as r 3 | 4 | 5 | def sort_objects( 6 | objs: List[r.Resource], by: str = "kind", reverse: bool = False 7 | ) -> List[r.Resource]: 8 | """Sorts a list of resource objects by a sorting schema, returning a new list 9 | 10 | **parameters** 11 | 12 | * **objs** - list of resource objects to be sorted 13 | * **by** - *(optional)* sorting schema. Possible values: 14 | * `'kind'` - sorts by kind, ranking objects in an order that is suitable for batch-applying 15 | many resources. For example, Namespaces and ServiceAccounts are sorted ahead of 16 | ClusterRoleBindings or Pods that might use them. The reverse of this order is suitable 17 | for batch-deleting. 18 | See _kind_rank_function for full details on sorting 19 | * **reverse** - *(optional)* if `True`, sorts in reverse order 20 | """ 21 | if by == "kind": 22 | objs = sorted(objs, key=_kind_rank_function, reverse=reverse) 23 | else: 24 | raise ValueError(f"Unknown sorting schema: {by}") 25 | return objs 26 | 27 | 28 | UNKNOWN_ITEM_SORT_VALUE = 1000 29 | RANK_ORDER = { 30 | "CustomResourceDefinition": 10, 31 | "Namespace": 20, 32 | "Secret": 31, 33 | "ServiceAccount": 32, 34 | "PersistentVolume": 33, 35 | "PersistentVolumeClaim": 34, 36 | "ConfigMap": 35, 37 | "Role": 41, 38 | "ClusterRole": 42, 39 | "RoleBinding": 43, 40 | "ClusterRoleBinding": 44, 41 | } 42 | 43 | 44 | def _kind_rank_function(obj: List[r.Resource]) -> int: 45 | """ 46 | Returns an integer rank based on an objects .kind 47 | 48 | Ranking is set to order kinds by: 49 | * CRDs 50 | * Namespaces 51 | * Things that might be referenced by pods (Secret, ServiceAccount, PVs/PVCs, ConfigMap) 52 | * RBAC 53 | * Roles and ClusterRoles 54 | * RoleBindings and ClusterRoleBindings 55 | * Everything else (Pod, Deployment, ...) 56 | """ 57 | return RANK_ORDER.get(obj.kind, UNKNOWN_ITEM_SORT_VALUE) 58 | -------------------------------------------------------------------------------- /lightkube/core/typing_extra.py: -------------------------------------------------------------------------------- 1 | """This module provides a compatibility layer for functions in typing that appeared on 3.8""" 2 | 3 | import collections.abc 4 | import sys 5 | 6 | if sys.version_info[:2] > (3, 7): 7 | from typing import get_args, get_origin 8 | 9 | else: 10 | 11 | def get_origin(tp): 12 | if hasattr(tp, "__origin__"): 13 | return tp.__origin__ 14 | return None 15 | 16 | def get_args(tp): 17 | if hasattr(tp, "__args__"): 18 | res = tp.__args__ 19 | if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis: 20 | res = (list(res[:-1]), res[-1]) 21 | return res 22 | return () 23 | -------------------------------------------------------------------------------- /lightkube/exceptions.py: -------------------------------------------------------------------------------- 1 | from .core.exceptions import ApiError, NotReadyError, ConfigError, LoadResourceError, ObjectDeleted, ConditionError 2 | 3 | __all__ = [ 4 | "ApiError", 5 | "NotReadyError", 6 | "ConfigError", 7 | "LoadResourceError", 8 | "ConditionError", 9 | "ObjectDeleted", 10 | ] 11 | -------------------------------------------------------------------------------- /lightkube/generic_resource.py: -------------------------------------------------------------------------------- 1 | from typing import Type, Any, Optional, overload 2 | 3 | from .core import resource as res 4 | from .core.client import Client 5 | from .core.async_client import AsyncClient 6 | from .core.internal_models import meta_v1, autoscaling_v1 7 | from .core.internal_resources import apiextensions 8 | from .core.resource_registry import resource_registry 9 | 10 | 11 | __all__ = [ 12 | "async_load_in_cluster_generic_resources", 13 | "create_global_resource", 14 | "create_namespaced_resource", 15 | "create_resources_from_crd", 16 | "get_generic_resource", 17 | "load_in_cluster_generic_resources", 18 | ] 19 | 20 | 21 | def get_generic_resource(version, kind): 22 | """Query generic resources already defined using one of the other methods described in this module or via 23 | `codecs.load_all_yaml(..., create_resources_for_crds=True)` 24 | 25 | **Parameters** 26 | 27 | * **version** `str` - Resource version including the API group. Example `stable.example.com/v1` 28 | * **kind** `str` - Resource kind. Example: `CronTab` 29 | 30 | **returns** class representing the generic resource or `None` if it's not found 31 | """ 32 | resource = resource_registry.get(version, kind) 33 | if resource is None or not issubclass( 34 | resource, (GenericGlobalResource, GenericNamespacedResource) 35 | ): 36 | return None 37 | return resource 38 | 39 | 40 | class Generic(dict): 41 | @overload 42 | def __init__( 43 | self, 44 | apiVersion: str = None, 45 | kind: str = None, 46 | metadata: meta_v1.ObjectMeta = None, 47 | **kwargs, 48 | ): 49 | pass 50 | 51 | def __init__(self, *args, **kwargs): 52 | super().__init__(*args, **kwargs) 53 | 54 | @property 55 | def apiVersion(self) -> str: 56 | return self.get("apiVersion") 57 | 58 | @property 59 | def kind(self) -> str: 60 | return self.get("kind") 61 | 62 | @property 63 | def status(self) -> str: 64 | return self.get("status") 65 | 66 | @property 67 | def metadata(self) -> Optional[meta_v1.ObjectMeta]: 68 | meta = self.get("metadata") 69 | if meta is None: 70 | return None 71 | elif isinstance(meta, meta_v1.ObjectMeta): 72 | return meta 73 | return meta_v1.ObjectMeta.from_dict(meta) 74 | 75 | def __getattr__(self, item): 76 | if item.startswith("_"): 77 | raise AttributeError(f"{item} not found") 78 | return self.get(item) 79 | 80 | @classmethod 81 | def from_dict(cls, d: dict, lazy=True): 82 | return cls(d) 83 | 84 | def to_dict(self, dict_factory=dict): 85 | d = dict_factory(self) 86 | if "metadata" in d and isinstance(d["metadata"], meta_v1.ObjectMeta): 87 | d["metadata"] = d["metadata"].to_dict(dict_factory) 88 | return d 89 | 90 | 91 | def create_api_info(group, version, kind, plural, verbs=None) -> res.ApiInfo: 92 | if verbs is None: 93 | verbs = [ 94 | "delete", 95 | "deletecollection", 96 | "get", 97 | "global_list", 98 | "global_watch", 99 | "list", 100 | "patch", 101 | "post", 102 | "put", 103 | "watch", 104 | ] 105 | return res.ApiInfo( 106 | resource=res.ResourceDef(group, version, kind), plural=plural, verbs=verbs 107 | ) 108 | 109 | 110 | class GenericGlobalScale(res.GlobalSubResource, autoscaling_v1.Scale): 111 | pass 112 | 113 | 114 | class GenericGlobalStatus(res.GlobalSubResource, Generic): 115 | pass 116 | 117 | 118 | class GenericNamespacedScale(res.NamespacedResourceG, autoscaling_v1.Scale): 119 | pass 120 | 121 | 122 | class GenericNamespacedStatus(res.NamespacedResourceG, Generic): 123 | pass 124 | 125 | 126 | def _create_subresource(main_class, parent_info: res.ApiInfo, action): 127 | class TmpName(main_class): 128 | _api_info = res.ApiInfo( 129 | resource=( 130 | parent_info.resource 131 | if action == "status" 132 | else res.ResourceDef("autoscaling", "v1", "Scale") 133 | ), 134 | parent=parent_info.resource, 135 | plural=parent_info.plural, 136 | verbs=["get", "patch", "put"], 137 | action=action, 138 | ) 139 | 140 | TmpName.__name__ = TmpName.__qualname__ = ( 141 | f"{parent_info.resource.kind}{action.capitalize()}" 142 | ) 143 | return TmpName 144 | 145 | 146 | class GenericGlobalResource(res.GlobalResource, Generic): 147 | Scale: Type[GenericGlobalScale] 148 | Status: Type[GenericGlobalStatus] 149 | 150 | 151 | class GenericNamespacedResource(res.NamespacedResourceG, Generic): 152 | Scale: Type[GenericNamespacedScale] 153 | Status: Type[GenericNamespacedStatus] 154 | 155 | 156 | def _api_info_signature(api_info: res.ApiInfo, namespaced: bool): 157 | return ( 158 | namespaced, 159 | api_info.plural, 160 | tuple(api_info.verbs) if api_info.verbs else None, 161 | ) 162 | 163 | 164 | def _create_resource(namespaced, group, version, kind, plural, verbs=None) -> Any: 165 | model = resource_registry.get(f"{group}/{version}", kind) 166 | api_info = create_api_info(group, version, kind, plural, verbs=verbs) 167 | signature = _api_info_signature(api_info, namespaced) 168 | 169 | if model is not None: 170 | curr_namespaced = issubclass(model, res.NamespacedResource) 171 | curr_signature = _api_info_signature(model._api_info, curr_namespaced) 172 | if curr_signature != signature: 173 | raise ValueError( 174 | f"Resource {kind} already created but with different signature" 175 | ) 176 | return model 177 | 178 | if namespaced: 179 | main, status, scale = ( 180 | GenericNamespacedResource, 181 | GenericNamespacedStatus, 182 | GenericNamespacedScale, 183 | ) 184 | else: 185 | main, status, scale = ( 186 | GenericGlobalResource, 187 | GenericGlobalStatus, 188 | GenericGlobalScale, 189 | ) 190 | 191 | class TmpName(main): 192 | _api_info = create_api_info(group, version, kind, plural, verbs=verbs) 193 | 194 | Scale = _create_subresource(scale, _api_info, action="scale") 195 | Status = _create_subresource(status, _api_info, action="status") 196 | 197 | TmpName.__name__ = TmpName.__qualname__ = kind 198 | return resource_registry.register(TmpName) 199 | 200 | 201 | def create_global_resource( 202 | group: str, version: str, kind: str, plural: str, verbs=None 203 | ) -> Type[GenericGlobalResource]: 204 | """Create a new class representing a global resource with the provided specifications. 205 | 206 | **Parameters** 207 | 208 | * **group** `str` - API group of the resource. Example `stable.example.com`. 209 | * **version** `str` - API group version. Example `v1`. 210 | * **kind** `str` - Resource name. Example `Job`. 211 | * **plural** `str` - Resource collection name. Example `jobs`. 212 | 213 | **returns** Subclass of `GenericGlobalResource`. 214 | """ 215 | return _create_resource(False, group, version, kind, plural, verbs=verbs) 216 | 217 | 218 | def create_namespaced_resource( 219 | group: str, version: str, kind: str, plural: str, verbs=None 220 | ) -> Type[GenericNamespacedResource]: 221 | """Create a new class representing a namespaced resource with the provided specifications. 222 | 223 | **Parameters** 224 | 225 | * **group** `str` - API group of the resource. Example `stable.example.com`. 226 | * **version** `str` - API group version. Example `v1`. 227 | * **kind** `str` - Resource name. Example `Job`. 228 | * **plural** `str` - Resource collection name. Example `jobs`. 229 | 230 | **returns** Subclass of `GenericNamespacedResource`. 231 | """ 232 | return _create_resource(True, group, version, kind, plural, verbs=verbs) 233 | 234 | 235 | def load_in_cluster_generic_resources(client: Client): 236 | """Loads all in-cluster CustomResourceDefinitions as generic resources. 237 | 238 | Once loaded, generic resources can be obtained from `generic_resource.get_generic_resource()`, 239 | or used implicitly such as when using `codecs.load_all_yaml()`. 240 | 241 | **Parameters** 242 | 243 | * **client** `Client` - Lightkube Client to use to load the CRDs. 244 | """ 245 | crds = client.list(apiextensions.CustomResourceDefinition) 246 | for crd in crds: 247 | create_resources_from_crd(crd) 248 | 249 | 250 | async def async_load_in_cluster_generic_resources(client: AsyncClient): 251 | """Loads all in-cluster CustomResourceDefinitions as generic resources. 252 | 253 | Once loaded, generic resources can be obtained from `generic_resource.get_generic_resource()`, 254 | or used implicitly such as when using `codecs.load_all_yaml()`. 255 | 256 | **Parameters** 257 | 258 | * **client** `AsyncClient` - Lightkube AsyncClient to use to load the CRDs. 259 | """ 260 | crds = client.list(apiextensions.CustomResourceDefinition) 261 | async for crd in crds: 262 | create_resources_from_crd(crd) 263 | 264 | 265 | def create_resources_from_crd(crd: apiextensions.CustomResourceDefinition): 266 | """Creates a generic resource for each version in a CustomResourceDefinition.""" 267 | if crd.spec.scope == "Namespaced": 268 | creator = create_namespaced_resource 269 | elif crd.spec.scope == "Cluster": 270 | creator = create_global_resource 271 | else: 272 | raise ValueError( 273 | f"Unexpected scope for resource. Expected 'Namespaced' or 'Cluster'," 274 | f" got {crd.spec.scope}" 275 | ) 276 | 277 | for version in crd.spec.versions: 278 | creator(**_crd_to_dict(crd, version.name)) 279 | 280 | 281 | def _crd_to_dict(crd, version_name): 282 | return { 283 | "group": crd.spec.group, 284 | "version": version_name, 285 | "kind": crd.spec.names.kind, 286 | "plural": crd.spec.names.plural, 287 | } 288 | -------------------------------------------------------------------------------- /lightkube/operators.py: -------------------------------------------------------------------------------- 1 | from typing import Iterable 2 | 3 | __all__ = ["in_", "not_in", "exists", "not_exists", "equal", "not_equal"] 4 | 5 | 6 | class Operator: 7 | def __init__(self, op_name: str, op: str, value=None): 8 | self.op = op 9 | self.value = value 10 | self.op_name = op_name 11 | 12 | def encode(self, key): 13 | return f"{key}{self.op}{self.value}" 14 | 15 | 16 | class SequenceOperator(Operator): 17 | def encode(self, key): 18 | return f"{key} {self.op} ({','.join(self.value)})" 19 | 20 | 21 | class BinaryOperator(Operator): 22 | pass 23 | 24 | 25 | class UnaryOperator(Operator): 26 | def encode(self, key): 27 | return f"{self.op}{key}" 28 | 29 | 30 | def in_(values: Iterable) -> SequenceOperator: 31 | return SequenceOperator("in_", "in", sorted(values)) 32 | 33 | 34 | def not_in(values: Iterable) -> SequenceOperator: 35 | return SequenceOperator("not_in", "notin", sorted(values)) 36 | 37 | 38 | def exists() -> UnaryOperator: 39 | return UnaryOperator("exists", "") 40 | 41 | 42 | def not_exists() -> UnaryOperator: 43 | return UnaryOperator("not_exists", "!") 44 | 45 | 46 | def equal(value: str) -> BinaryOperator: 47 | return BinaryOperator("equal", "=", value) 48 | 49 | 50 | def not_equal(value: str) -> BinaryOperator: 51 | return BinaryOperator("not_equal", "!=", value) 52 | -------------------------------------------------------------------------------- /lightkube/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gtsystem/lightkube/ddeb2928c3de93b48e13933e5ad07d0ed7b115fc/lightkube/py.typed -------------------------------------------------------------------------------- /lightkube/types.py: -------------------------------------------------------------------------------- 1 | import enum 2 | from dataclasses import dataclass 3 | import typing 4 | 5 | 6 | class PatchType(enum.Enum): 7 | """ 8 | Attributes: 9 | JSON: Execute a json patch 10 | MERGE: Execute a json merge patch 11 | STRATEGIC: Execute a strategic merge patch 12 | APPLY: Execute a [server side apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/) 13 | """ 14 | JSON = "application/json-patch+json" 15 | MERGE = "application/merge-patch+json" 16 | STRATEGIC = "application/strategic-merge-patch+json" 17 | APPLY = "application/apply-patch+yaml" 18 | 19 | 20 | class CascadeType(enum.Enum): 21 | """ 22 | Attributes: 23 | ORPHAN: orphan the dependents 24 | BACKGROUND: allow the garbage collector to delete the dependents in the background 25 | FOREGROUND: a cascading policy that deletes all dependents in the foreground 26 | """ 27 | ORPHAN = "Orphan" 28 | BACKGROUND = "Background" 29 | FOREGROUND = "Foreground" 30 | 31 | 32 | class OnErrorAction(enum.Enum): 33 | """ 34 | Attributes: 35 | RETRY: Retry to perform the API call again from the last version 36 | STOP: Stop silently the iterator 37 | RAISE: Raise the error on the caller scope 38 | """ 39 | RETRY = 0 40 | STOP = 1 41 | RAISE = 2 42 | 43 | 44 | @dataclass 45 | class OnErrorResult: 46 | action: OnErrorAction 47 | sleep: float = 0 48 | 49 | 50 | OnErrorHandler = typing.Callable[[Exception, int], OnErrorResult] 51 | 52 | 53 | def on_error_raise(e: Exception, count: int): 54 | """Raise the error on the caller scope""" 55 | return OnErrorResult(OnErrorAction.RAISE) 56 | 57 | 58 | def on_error_stop(e: Exception, count: int): 59 | """Stop silently the iterator""" 60 | return OnErrorResult(OnErrorAction.STOP) 61 | 62 | 63 | def on_error_retry(e: Exception, count: int): 64 | """Retry to perform the API call again from the last version""" 65 | return OnErrorResult(OnErrorAction.RETRY) 66 | -------------------------------------------------------------------------------- /lightkube/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gtsystem/lightkube/ddeb2928c3de93b48e13933e5ad07d0ed7b115fc/lightkube/utils/__init__.py -------------------------------------------------------------------------------- /lightkube/utils/quantity.py: -------------------------------------------------------------------------------- 1 | import decimal 2 | import re 3 | from typing import Optional, overload 4 | 5 | from ..core.internal_models import core_v1 6 | 7 | ResourceRequirements = core_v1.ResourceRequirements 8 | 9 | MULTIPLIERS = { 10 | # Bytes 11 | "m": (10, -3), # 1000^(-1) (=0.001) 12 | "": (10, 0), # 1000^0 (=1) 13 | "k": (10, 3), # 1000^1 14 | "M": (10, 6), # 1000^2 15 | "G": (10, 9), # 1000^3 16 | "T": (10, 12), # 1000^4 17 | "P": (10, 15), # 1000^5 18 | "E": (10, 18), # 1000^6 19 | "Z": (10, 21), # 1000^7 20 | "Y": (10, 24), # 1000^8 21 | # Bibytes 22 | "Ki": (1024, 1), # 2^10 23 | "Mi": (1024, 2), # 2^20 24 | "Gi": (1024, 3), # 2^30 25 | "Ti": (1024, 4), # 2^40 26 | "Pi": (1024, 5), # 2^50 27 | "Ei": (1024, 6), # 2^60 28 | "Zi": (1024, 7), # 2^70 29 | "Yi": (1024, 8), # 2^80 30 | } 31 | 32 | # Pre-calculate multipliers and store as decimals. 33 | MULTIPLIERS = {k: decimal.Decimal(v[0]) ** v[1] for k, v in MULTIPLIERS.items()} 34 | 35 | 36 | def parse_quantity(quantity: Optional[str]) -> Optional[decimal.Decimal]: 37 | """Parse a quantity string into a bare (suffix-less) decimal. 38 | 39 | Kubernetes converts user input to a canonical representation. For example, "0.9Gi" would be converted 40 | to "966367641600m". 41 | This function can be useful for comparing user input to actual values, for example comparing 42 | resource limits between a StatefulSet's template 43 | (`statefulset.spec.template.spec.containers[i].resources`) and a scheduled pod 44 | (`pod.spec.containers[i].resources`) after patching the StatefulSet. 45 | 46 | **Parameters** 47 | 48 | * **quantity** `str` - An str representing a K8s quantity (e.g. "1Gi" or "1G"), per 49 | https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. 50 | 51 | **returns** An instance of `decimal.Decimal` representing the quantity as a bare decimal. 52 | """ 53 | if quantity is None: 54 | # This is useful for comparing e.g. ResourceRequirements.limits.get("cpu"), which can be 55 | # None. 56 | return None 57 | 58 | pat = re.compile(r"([+-]?\d+(?:[.]\d*)?(?:e[+-]?\d+)?|[.]\d+(?:e[+-]?\d+)?)(.*)") 59 | match = pat.match(quantity) 60 | 61 | if not match: 62 | raise ValueError("Invalid quantity string: '{}'".format(quantity)) 63 | 64 | try: 65 | value = decimal.Decimal(match.group(1)) 66 | except ArithmeticError as e: 67 | raise ValueError("Invalid numerical value") from e 68 | 69 | unit = match.group(2) 70 | 71 | try: 72 | multiplier = MULTIPLIERS[unit] 73 | except KeyError: 74 | raise ValueError("Invalid unit suffix: {}".format(unit)) 75 | 76 | try: 77 | as_decimal = value * multiplier 78 | return as_decimal.quantize(decimal.Decimal("0.001"), rounding=decimal.ROUND_UP) 79 | except ArithmeticError as e: 80 | raise ValueError("Invalid numerical value") from e 81 | 82 | 83 | def _equals_canonically( 84 | first_dict: Optional[dict], second_dict: Optional[dict] 85 | ) -> bool: 86 | """Compare resource dicts such as 'limits' or 'requests'.""" 87 | if first_dict == second_dict: 88 | # This covers two cases: (1) both args are None; (2) both args are identical dicts. 89 | return True 90 | if first_dict and second_dict: 91 | if first_dict.keys() != second_dict.keys(): 92 | # The dicts have different keys, so they cannot possibly be equal 93 | return False 94 | return all( 95 | parse_quantity(first_dict[k]) == parse_quantity(second_dict[k]) 96 | for k in first_dict.keys() 97 | ) 98 | if not first_dict and not second_dict: 99 | # This covers cases such as first=None and second={} 100 | return True 101 | return False 102 | 103 | 104 | @overload 105 | def equals_canonically( 106 | first: ResourceRequirements, second: ResourceRequirements 107 | ) -> bool: ... 108 | 109 | 110 | @overload 111 | def equals_canonically(first: Optional[dict], second: Optional[dict]) -> bool: ... 112 | 113 | 114 | def equals_canonically(first, second): 115 | """Compare two resource requirements for numerical equality. 116 | 117 | Both arguments must be of the same type and can be either: 118 | 119 | - An instance of `core_v1.ResourceRequirements` 120 | - `Optional[dict]`: representing the "limits" or the "requests" portion of `ResourceRequirements`. 121 | 122 | ```python 123 | >>> equals_canonically({"cpu": "0.6"}, {"cpu": "600m"}) 124 | True 125 | 126 | >>> equals_canonically( 127 | ResourceRequirements(limits={"cpu": "0.6"}), 128 | ResourceRequirements(limits={"cpu": "600m"}) 129 | ) 130 | True 131 | ``` 132 | 133 | **Parameters** 134 | 135 | * **first** `ResourceRequirements` or `dict` - The first item to compare. 136 | * **second** `ResourceRequirements` or `dict` - The second item to compare. 137 | 138 | **returns** True, if both arguments are numerically equal; False otherwise. 139 | """ 140 | if isinstance(first, (dict, type(None))) and isinstance(second, (dict, type(None))): 141 | # Args are 'limits' or 'requests' dicts 142 | return _equals_canonically(first, second) 143 | elif isinstance(first, ResourceRequirements) and isinstance( 144 | second, ResourceRequirements 145 | ): 146 | # Args are ResourceRequirements, which may contain 'limits' and 'requests' dicts 147 | ks = ("limits", "requests") 148 | return all( 149 | _equals_canonically(getattr(first, k), getattr(second, k)) for k in ks 150 | ) 151 | else: 152 | raise TypeError( 153 | "unsupported operand type(s) for canonical comparison: '{}' and '{}'".format( 154 | first.__class__.__name__, 155 | second.__class__.__name__, 156 | ) 157 | ) 158 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: LightKube 2 | theme: 3 | name: material 4 | hljs_style: monokai 5 | palette: 6 | primary: deep purple 7 | 8 | repo_name: gtsystem/lightkube 9 | repo_url: https://github.com/gtsystem/lightkube 10 | edit_uri: "" 11 | 12 | nav: 13 | - QuickStart: index.md 14 | - Configuration: configuration.md 15 | - Reference: 16 | - Client: reference/client.md 17 | - AsyncClient: reference/async_client.md 18 | - Configuration: reference/configuration.md 19 | - Exceptions: reference/exceptions.md 20 | - Types: reference/types.md 21 | - Resources & Models: resources-and-models.md 22 | - Async Usage: async-usage.md 23 | - Utils: utils.md 24 | - Advanced: 25 | - Selectors: selectors.md 26 | - Generic Resources: generic-resources.md 27 | - Custom Resources: custom-resources.md 28 | - Load/Dump Objects: codecs.md 29 | - List-Watch Pattern: list-watch.md 30 | 31 | extra_css: 32 | - css/custom.css 33 | 34 | markdown_extensions: 35 | - admonition 36 | - pymdownx.highlight 37 | - pymdownx.superfences 38 | - markdown_include.include: 39 | base_path: . 40 | 41 | plugins: 42 | - mkdocstrings: 43 | default_handler: python 44 | handlers: 45 | python: 46 | options: 47 | heading_level: 2 48 | show_root_heading: true 49 | show_symbol_type_heading: true 50 | show_signature_annotations: true 51 | signature_crossrefs: true 52 | show_if_no_docstring: true 53 | docstring_section_style: list 54 | filters: ["!^_[^_]"] 55 | merge_init_into_class: true -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | minversion = 6.0 3 | testpaths = 4 | tests 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | asyncmock 2 | httpx >= 0.28.1 3 | pytest 4 | pytest-asyncio 5 | respx 6 | PyYAML 7 | lightkube-models >= 1.15.6.1 8 | backports-datetime-fromisoformat;python_version<"3.7" 9 | dataclasses;python_version<"3.7" 10 | -------------------------------------------------------------------------------- /setup-test-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -d "../lightkube-models" ]; then 4 | SOURCE_DIR=$(python -c "import os.path; print(os.path.realpath('../lightkube-models'))") 5 | else 6 | SOURCE_DIR=$(python -c 'import sysconfig; print(sysconfig.get_paths()["purelib"])') 7 | pip install lightkube-models 8 | fi 9 | rm -f lightkube/models lightkube/resources 10 | ln -s $SOURCE_DIR/lightkube/models lightkube 11 | ln -s $SOURCE_DIR/lightkube/resources lightkube 12 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from pathlib import Path 3 | 4 | setup( 5 | name='lightkube', 6 | version="0.17.2", 7 | description='Lightweight kubernetes client library', 8 | long_description=Path("README.md").read_text(), 9 | long_description_content_type="text/markdown", 10 | author='Giuseppe Tribulato', 11 | author_email='gtsystem@gmail.com', 12 | license='MIT', 13 | url='https://github.com/gtsystem/lightkube', 14 | packages=['lightkube', 'lightkube.config', 'lightkube.core', 'lightkube.utils'], 15 | package_data={'lightkube': ['py.typed']}, 16 | install_requires=[ 17 | 'lightkube-models >= 1.15.12.0', 18 | 'httpx >= 0.28.1, < 1.0.0', 19 | 'PyYAML' 20 | ], 21 | extras_require={ 22 | "dev": [ 23 | "pytest", 24 | "pytest-asyncio", 25 | "respx" 26 | ] 27 | }, 28 | classifiers=[ 29 | 'Development Status :: 4 - Beta', 30 | 'Intended Audience :: Developers', 31 | 'Intended Audience :: System Administrators', 32 | 'License :: OSI Approved :: MIT License', 33 | 'Programming Language :: Python :: 3.8', 34 | 'Programming Language :: Python :: 3.9', 35 | 'Programming Language :: Python :: 3.10', 36 | 'Programming Language :: Python :: 3.11', 37 | 'Programming Language :: Python :: 3.12', 38 | 'Programming Language :: Python :: 3.13' 39 | ] 40 | ) 41 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gtsystem/lightkube/ddeb2928c3de93b48e13933e5ad07d0ed7b115fc/tests/__init__.py -------------------------------------------------------------------------------- /tests/data/auth_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | echo '{"apiVersion":"client.authentication.k8s.io/v1beta1", "kind":"ExecCredential","status":{"token":"my-bearer-token"}}' 3 | -------------------------------------------------------------------------------- /tests/data/clientkey.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDRLCtJpkXjq5G1 3 | 32sQ2Q0cWguEosNGG6/sYjJlHItAn+STItmf8o3fHTo9eZc+uTCpEYiazqSVOIsA 4 | c5xGW7EOcPjDn9Vsl6Klnd76R/C12mr4aEUkra2cNsdxJhyu51Pj4ifkZMpoJJCI 5 | Nm02Tx66rWWGHG/bUX+m/hnsIRGfaJ8C4NbhLwSRmkyuxn8E2sPDvydx4RVfTYfj 6 | XZCE1aJoNIzJ6towk9BEsI+ACOo3pybJj+wAO/kZg3tpr3i8HEZFE7aZvPJ6KBWx 7 | UVWJSZVPJ1POdGPOLmiDXtGUwJvUcAbw0WHsJIh7nP6mbvyaYNiYGAyVlIFF8PJ+ 8 | cYiXYFxDAgMBAAECggEACIp5KgS0DdvPk1GQGZwDQQkcN0o+FvrCcDCCRkaoGPdu 9 | aeOeZz2MNXQIEMKKPnFpXz3sCgYuCjnI0zflRaowzJooTjSUjl6SsZiEpkuRtJs6 10 | ivIXAKxmzl4ePqyDt1CSyFdPsa+75Ay6KhWu1+zbIFw8LQG0P4xQXg06Gb4v7gZQ 11 | qvmKRKLC/7hcV6X6+s2bOuM6mB7EL319aUklPMoJH8hsWT9Fw8voeyUz5VXLUTs1 12 | JS6TP0FoXEbtg/G06oPzTomLWe+7Aq+g6PG/HflzcjqvJ30wrAdVXNiWfgkjgDxl 13 | 7FBk5GcPe08i5jMoUdxtCfer+U1lBGiLz+1MsPUU4QKBgQD0Qa1otFrMj1IZfrA6 14 | ofZ0O5JTvPygQuu+2bLPSYH9c+hJCPf50JA/w5n/nxkFIr0yFhm+EnHSrMrgpglf 15 | A2Sj+sAKvmajlIYgvjKmXxUpWX5FZ2nWh/o43a7jPHT/mee59V1tTsgCoamhGMy1 16 | n1XHVvRVV1RjCQu8DEC7PP2VBQKBgQDbOqz/YMW2bqLichCokYU3YIacSHgin3ls 17 | jQ7FQYTTFQ8GwDg7iZBbZ6vkNDIIYTRY671QMXU8dTUuMk/rZfrO0trz2L8RDrUg 18 | 5p7epDgcx4mLtRIwjcLJE7T4Cwh/Zchhk+mzGn75MC53ckSqwfSYfMQ/YI8bYwMK 19 | AxzYO/xupwKBgQCceIJc48SS7HEckfLU7LJTzWG0sQlopNYegZkxfxZ9xcWVG+C3 20 | MOtnXaeGgGXny1RGBLBi+a/e1QB2Hwc7zZGoFlb1yvk3a0rtKMqLl7eXsJPaZCAB 21 | 5UPfL+v799u/bdlrYAqEnoY0YVmoMJna84Jg24xUK0iM1NumkHbbO3v6/QKBgCsJ 22 | z+Cq3OW9vph6EC8nsmF2v7Z6u6sAZ9QZtSaggDT4U2Td46w8i2yGY8Z/QLtIagBy 23 | 902BCCUAVZpmIi5ybNShrH5mtMvieUimPdYzoxhzS9tzhsila/IRvltbvyVTlA0j 24 | /qM8tmSxQs4MTtK/FQfCprxSdoXH4Fbc2ZLR4/LNAoGAA9OlHzTdXpAKQgXZG0Pf 25 | N9NWdvIWWuKB4l3U2e6CU7jUmT2GH3lknMecZo95sjgI2dZ3pqF0swslKtXUvfgF 26 | +hNbLaLzkRLDls/Q/tQoIyM82LMWFI3rMQPXbbL1dSrVErUft7FwVwco+2Civu0z 27 | CAn6r0KHHHiBb4Scgk6NfoI= 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /tests/data/clientreq.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDjzCCAnegAwIBAgIUHA9xE40tm3defw2dP7AaSfiJD18wDQYJKoZIhvcNAQEL 3 | BQAwVzELMAkGA1UEBhMCREUxDzANBgNVBAgMBkJlcmxpbjEPMA0GA1UEBwwGQmVy 4 | bGluMRAwDgYDVQQKDAdFeGFtcGxlMRQwEgYDVQQDDAtleGFtcGxlLmNvbTAeFw0y 5 | NDEyMTQxNzE1MDdaFw0yNTAxMTMxNzE1MDdaMFcxCzAJBgNVBAYTAkRFMQ8wDQYD 6 | VQQIDAZCZXJsaW4xDzANBgNVBAcMBkJlcmxpbjEQMA4GA1UECgwHRXhhbXBsZTEU 7 | MBIGA1UEAwwLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK 8 | AoIBAQDRLCtJpkXjq5G132sQ2Q0cWguEosNGG6/sYjJlHItAn+STItmf8o3fHTo9 9 | eZc+uTCpEYiazqSVOIsAc5xGW7EOcPjDn9Vsl6Klnd76R/C12mr4aEUkra2cNsdx 10 | Jhyu51Pj4ifkZMpoJJCINm02Tx66rWWGHG/bUX+m/hnsIRGfaJ8C4NbhLwSRmkyu 11 | xn8E2sPDvydx4RVfTYfjXZCE1aJoNIzJ6towk9BEsI+ACOo3pybJj+wAO/kZg3tp 12 | r3i8HEZFE7aZvPJ6KBWxUVWJSZVPJ1POdGPOLmiDXtGUwJvUcAbw0WHsJIh7nP6m 13 | bvyaYNiYGAyVlIFF8PJ+cYiXYFxDAgMBAAGjUzBRMB0GA1UdDgQWBBQc6QMw+fKF 14 | zVTfSTgvORkkEr6+OzAfBgNVHSMEGDAWgBQc6QMw+fKFzVTfSTgvORkkEr6+OzAP 15 | BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQC7BroYEqjM+zZGbSwl 16 | IvuclDQDFRmRQRA7RKiicptJfUDxmOzdk/COh2wFZ1+mCVNamwexLdSETooDd/9s 17 | Qm0LaR3KzpUer+6WYq2y8ijxT2wzsr/Yejq8v1GkejezuedzbMvdihsDvJI2YUuo 18 | 3yHlTWifLse7dn6Ru07P7hnxTfFrav/z6H5PtuXKJ76FUbqLZieRc6Eve5gwsSTc 19 | h9+DjpdVD8rz88PurLNFA1C72ZU384W/xnYhNICatQ26DiHO5cWbHPdwVhOenM/F 20 | Ow5qn/FcM3T5lOamgakdflcn/sFxXJjOj2aWnWRnYOp/zea3WCpYYIR59SSCa1EF 21 | xxDd 22 | -----END CERTIFICATE----- 23 | -------------------------------------------------------------------------------- /tests/data/example-def-null.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | --- 3 | -------------------------------------------------------------------------------- /tests/data/example-def-with-lists.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: SecretList 3 | items: 4 | - apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: "nginxsecret" 8 | namespace: "default" 9 | type: kubernetes.io/tls 10 | data: 11 | tls.crt: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURIekNDQWdW...Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0t" 12 | tls.key: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURB...TkJnVW1VbGc9Ci0tLS0tRU5EIFBSSVZB" 13 | --- 14 | apiVersion: v1 15 | kind: List 16 | items: 17 | - apiVersion: myapp.com/v1 18 | kind: Mydb 19 | metadata: 20 | name: bla 21 | a: xx 22 | b: yy 23 | --- 24 | apiVersion: v1 25 | kind: ServiceList 26 | items: 27 | - apiVersion: v1 28 | kind: ServiceList 29 | metadata: {} 30 | items: 31 | - apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: my-nginx 35 | labels: 36 | run: my-nginx 37 | spec: 38 | type: NodePort 39 | ports: 40 | - port: 8080 41 | targetPort: 80 42 | protocol: TCP 43 | name: http 44 | - port: 443 45 | protocol: TCP 46 | name: https 47 | selector: 48 | run: my-nginx 49 | --- 50 | apiVersion: apps/v1 51 | kind: DeploymentList 52 | items: 53 | - apiVersion: apps/v1 54 | kind: Deployment 55 | metadata: 56 | name: my-nginx 57 | spec: 58 | selector: 59 | matchLabels: 60 | run: my-nginx 61 | replicas: 1 62 | template: 63 | metadata: 64 | labels: 65 | run: my-nginx 66 | spec: 67 | volumes: 68 | - name: secret-volume 69 | secret: 70 | secretName: nginxsecret 71 | - name: configmap-volume 72 | configMap: 73 | name: nginxconfigmap 74 | containers: 75 | - name: nginxhttps 76 | image: bprashanth/nginxhttps:1.0 77 | ports: 78 | - containerPort: 443 79 | - containerPort: 80 80 | volumeMounts: 81 | - mountPath: /etc/nginx/ssl 82 | name: secret-volume 83 | - mountPath: /etc/nginx/conf.d 84 | name: configmap-volume 85 | -------------------------------------------------------------------------------- /tests/data/example-def-with-nulls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: "nginxsecret" 5 | namespace: "default" 6 | type: kubernetes.io/tls 7 | data: 8 | tls.crt: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURIekNDQWdW...Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0t" 9 | tls.key: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURB...TkJnVW1VbGc9Ci0tLS0tRU5EIFBSSVZB" 10 | --- 11 | --- 12 | apiVersion: myapp.com/v1 13 | kind: Mydb 14 | metadata: 15 | name: bla 16 | a: xx 17 | b: yy 18 | --- 19 | --- 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | name: my-nginx 24 | labels: 25 | run: my-nginx 26 | spec: 27 | type: NodePort 28 | ports: 29 | - port: 8080 30 | targetPort: 80 31 | protocol: TCP 32 | name: http 33 | - port: 443 34 | protocol: TCP 35 | name: https 36 | selector: 37 | run: my-nginx 38 | --- 39 | --- 40 | apiVersion: apps/v1 41 | kind: Deployment 42 | metadata: 43 | name: my-nginx 44 | spec: 45 | selector: 46 | matchLabels: 47 | run: my-nginx 48 | replicas: 1 49 | template: 50 | metadata: 51 | labels: 52 | run: my-nginx 53 | spec: 54 | volumes: 55 | - name: secret-volume 56 | secret: 57 | secretName: nginxsecret 58 | - name: configmap-volume 59 | configMap: 60 | name: nginxconfigmap 61 | containers: 62 | - name: nginxhttps 63 | image: bprashanth/nginxhttps:1.0 64 | ports: 65 | - containerPort: 443 66 | - containerPort: 80 67 | volumeMounts: 68 | - mountPath: /etc/nginx/ssl 69 | name: secret-volume 70 | - mountPath: /etc/nginx/conf.d 71 | name: configmap-volume 72 | -------------------------------------------------------------------------------- /tests/data/example-def.tmpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: "nginxsecret" 5 | namespace: "default" 6 | type: kubernetes.io/tls 7 | data: 8 | tls.crt: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURIekNDQWdlZ0F3SUJBZ0lKQUp5M3lQK0pzMlpJTUEwR0NTcUdTSWIzRFFFQkJRVUFNQ1l4RVRBUEJnTlYKQkFNVENHNW5hVzU0YzNaak1SRXdEd1lEVlFRS0V3aHVaMmx1ZUhOMll6QWVGdzB4TnpFd01qWXdOekEzTVRKYQpGdzB4T0RFd01qWXdOekEzTVRKYU1DWXhFVEFQQmdOVkJBTVRDRzVuYVc1NGMzWmpNUkV3RHdZRFZRUUtFd2h1CloybHVlSE4yWXpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBSjFxSU1SOVdWM0IKMlZIQlRMRmtobDRONXljMEJxYUhIQktMSnJMcy8vdzZhU3hRS29GbHlJSU94NGUrMlN5ajBFcndCLzlYTnBwbQppeW1CL3JkRldkOXg5UWhBQUxCZkVaTmNiV3NsTVFVcnhBZW50VWt1dk1vLzgvMHRpbGhjc3paenJEYVJ4NEo5Ci82UVRtVVI3a0ZTWUpOWTVQZkR3cGc3dlVvaDZmZ1Voam92VG42eHNVR0M2QURVODBpNXFlZWhNeVI1N2lmU2YKNHZpaXdIY3hnL3lZR1JBRS9mRTRqakxCdmdONjc2SU90S01rZXV3R0ljNDFhd05tNnNTSzRqYUNGeGpYSnZaZQp2by9kTlEybHhHWCtKT2l3SEhXbXNhdGp4WTRaNVk3R1ZoK0QrWnYvcW1mMFgvbVY0Rmo1NzV3ajFMWVBocWtsCmdhSXZYRyt4U1FVQ0F3RUFBYU5RTUU0d0hRWURWUjBPQkJZRUZPNG9OWkI3YXc1OUlsYkROMzhIYkduYnhFVjcKTUI4R0ExVWRJd1FZTUJhQUZPNG9OWkI3YXc1OUlsYkROMzhIYkduYnhFVjdNQXdHQTFVZEV3UUZNQU1CQWY4dwpEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRVhTMW9FU0lFaXdyMDhWcVA0K2NwTHI3TW5FMTducDBvMm14alFvCjRGb0RvRjdRZnZqeE04Tzd2TjB0clcxb2pGSW0vWDE4ZnZaL3k4ZzVaWG40Vm8zc3hKVmRBcStNZC9jTStzUGEKNmJjTkNUekZqeFpUV0UrKzE5NS9zb2dmOUZ3VDVDK3U2Q3B5N0M3MTZvUXRUakViV05VdEt4cXI0Nk1OZWNCMApwRFhWZmdWQTRadkR4NFo3S2RiZDY5eXM3OVFHYmg5ZW1PZ05NZFlsSUswSGt0ejF5WU4vbVpmK3FqTkJqbWZjCkNnMnlwbGQ0Wi8rUUNQZjl3SkoybFIrY2FnT0R4elBWcGxNSEcybzgvTHFDdnh6elZPUDUxeXdLZEtxaUMwSVEKQ0I5T2wwWW5scE9UNEh1b2hSUzBPOStlMm9KdFZsNUIyczRpbDlhZ3RTVXFxUlU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" 9 | tls.key: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQ2RhaURFZlZsZHdkbFIKd1V5eFpJWmVEZWNuTkFhbWh4d1NpeWF5N1AvOE9ta3NVQ3FCWmNpQ0RzZUh2dGtzbzlCSzhBZi9WemFhWm9zcApnZjYzUlZuZmNmVUlRQUN3WHhHVFhHMXJKVEVGSzhRSHA3VkpMcnpLUC9QOUxZcFlYTE0yYzZ3MmtjZUNmZitrCkU1bEVlNUJVbUNUV09UM3c4S1lPNzFLSWVuNEZJWTZMMDUrc2JGQmd1Z0ExUE5JdWFubm9UTWtlZTRuMG4rTDQKb3NCM01ZUDhtQmtRQlAzeE9JNHl3YjREZXUraURyU2pKSHJzQmlIT05Xc0RadXJFaXVJMmdoY1kxeWIyWHI2UAozVFVOcGNSbC9pVG9zQngxcHJHclk4V09HZVdPeGxZZmcvbWIvNnBuOUYvNWxlQlkrZStjSTlTMkQ0YXBKWUdpCkwxeHZzVWtGQWdNQkFBRUNnZ0VBZFhCK0xkbk8ySElOTGo5bWRsb25IUGlHWWVzZ294RGQwci9hQ1Zkank4dlEKTjIwL3FQWkUxek1yall6Ry9kVGhTMmMwc0QxaTBXSjdwR1lGb0xtdXlWTjltY0FXUTM5SjM0VHZaU2FFSWZWNgo5TE1jUHhNTmFsNjRLMFRVbUFQZytGam9QSFlhUUxLOERLOUtnNXNrSE5pOWNzMlY5ckd6VWlVZWtBL0RBUlBTClI3L2ZjUFBacDRuRWVBZmI3WTk1R1llb1p5V21SU3VKdlNyblBESGtUdW1vVlVWdkxMRHRzaG9reUxiTWVtN3oKMmJzVmpwSW1GTHJqbGtmQXlpNHg0WjJrV3YyMFRrdWtsZU1jaVlMbjk4QWxiRi9DSmRLM3QraTRoMTVlR2ZQegpoTnh3bk9QdlVTaDR2Q0o3c2Q5TmtEUGJvS2JneVVHOXBYamZhRGR2UVFLQmdRRFFLM01nUkhkQ1pKNVFqZWFKClFGdXF4cHdnNzhZTjQyL1NwenlUYmtGcVFoQWtyczJxWGx1MDZBRzhrZzIzQkswaHkzaE9zSGgxcXRVK3NHZVAKOWRERHBsUWV0ODZsY2FlR3hoc0V0L1R6cEdtNGFKSm5oNzVVaTVGZk9QTDhPTm1FZ3MxMVRhUldhNzZxelRyMgphRlpjQ2pWV1g0YnRSTHVwSkgrMjZnY0FhUUtCZ1FEQmxVSUUzTnNVOFBBZEYvL25sQVB5VWs1T3lDdWc3dmVyClUycXlrdXFzYnBkSi9hODViT1JhM05IVmpVM25uRGpHVHBWaE9JeXg5TEFrc2RwZEFjVmxvcG9HODhXYk9lMTAKMUdqbnkySmdDK3JVWUZiRGtpUGx1K09IYnRnOXFYcGJMSHBzUVpsMGhucDBYSFNYVm9CMUliQndnMGEyOFVadApCbFBtWmc2d1BRS0JnRHVIUVV2SDZHYTNDVUsxNFdmOFhIcFFnMU16M2VvWTBPQm5iSDRvZUZKZmcraEppSXlnCm9RN3hqWldVR3BIc3AyblRtcHErQWlSNzdyRVhsdlhtOElVU2FsbkNiRGlKY01Pc29RdFBZNS9NczJMRm5LQTQKaENmL0pWb2FtZm1nZEN0ZGtFMXNINE9MR2lJVHdEbTRpb0dWZGIwMllnbzFyb2htNUpLMUI3MkpBb0dBUW01UQpHNDhXOTVhL0w1eSt5dCsyZ3YvUHM2VnBvMjZlTzRNQ3lJazJVem9ZWE9IYnNkODJkaC8xT2sybGdHZlI2K3VuCnc1YytZUXRSTHlhQmd3MUtpbGhFZDBKTWU3cGpUSVpnQWJ0LzVPbnlDak9OVXN2aDJjS2lrQ1Z2dTZsZlBjNkQKckliT2ZIaHhxV0RZK2Q1TGN1YSt2NzJ0RkxhenJsSlBsRzlOZHhrQ2dZRUF5elIzT3UyMDNRVVV6bUlCRkwzZAp4Wm5XZ0JLSEo3TnNxcGFWb2RjL0d5aGVycjFDZzE2MmJaSjJDV2RsZkI0VEdtUjZZdmxTZEFOOFRwUWhFbUtKCnFBLzVzdHdxNWd0WGVLOVJmMWxXK29xNThRNTBxMmk1NVdUTThoSDZhTjlaMTltZ0FGdE5VdGNqQUx2dFYxdEYKWSs4WFJkSHJaRnBIWll2NWkwVW1VbGc9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K" 10 | --- 11 | apiVersion: myapp.com/v1 12 | kind: Mydb 13 | metadata: 14 | name: bla-{{test}} 15 | a: xx 16 | b: yy 17 | --- 18 | apiVersion: v1 19 | kind: Service 20 | metadata: 21 | name: my-nginx 22 | labels: 23 | run: my-nginx 24 | spec: 25 | type: NodePort 26 | ports: 27 | - port: 8080 28 | targetPort: 80 29 | protocol: TCP 30 | name: http 31 | - port: 443 32 | protocol: TCP 33 | name: https 34 | selector: 35 | run: my-nginx 36 | --- 37 | apiVersion: apps/v1 38 | kind: Deployment 39 | metadata: 40 | name: my-nginx 41 | spec: 42 | selector: 43 | matchLabels: 44 | run: my-nginx 45 | replicas: 1 46 | template: 47 | metadata: 48 | labels: 49 | run: my-nginx 50 | spec: 51 | volumes: 52 | - name: secret-volume 53 | secret: 54 | secretName: nginxsecret 55 | - name: configmap-volume 56 | configMap: 57 | name: nginxconfigmap 58 | containers: 59 | - name: nginxhttps 60 | image: bprashanth/nginxhttps:1.0 61 | ports: 62 | - containerPort: 443 63 | - containerPort: 80 64 | volumeMounts: 65 | - mountPath: /etc/nginx/ssl 66 | name: secret-volume 67 | - mountPath: /etc/nginx/conf.d 68 | name: configmap-volume 69 | -------------------------------------------------------------------------------- /tests/data/example-def.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: "nginxsecret" 5 | namespace: "default" 6 | type: kubernetes.io/tls 7 | data: 8 | tls.crt: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURIekNDQWdW...Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0t" 9 | tls.key: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURB...TkJnVW1VbGc9Ci0tLS0tRU5EIFBSSVZB" 10 | --- 11 | apiVersion: myapp.com/v1 12 | kind: Mydb 13 | metadata: 14 | name: bla 15 | a: xx 16 | b: yy 17 | --- 18 | apiVersion: v1 19 | kind: Service 20 | metadata: 21 | name: my-nginx 22 | labels: 23 | run: my-nginx 24 | spec: 25 | type: NodePort 26 | ports: 27 | - port: 8080 28 | targetPort: 80 29 | protocol: TCP 30 | name: http 31 | - port: 443 32 | protocol: TCP 33 | name: https 34 | selector: 35 | run: my-nginx 36 | --- 37 | apiVersion: apps/v1 38 | kind: Deployment 39 | metadata: 40 | name: my-nginx 41 | spec: 42 | selector: 43 | matchLabels: 44 | run: my-nginx 45 | replicas: 1 46 | template: 47 | metadata: 48 | labels: 49 | run: my-nginx 50 | spec: 51 | volumes: 52 | - name: secret-volume 53 | secret: 54 | secretName: nginxsecret 55 | - name: configmap-volume 56 | configMap: 57 | name: nginxconfigmap 58 | containers: 59 | - name: nginxhttps 60 | image: bprashanth/nginxhttps:1.0 61 | ports: 62 | - containerPort: 443 63 | - containerPort: 80 64 | volumeMounts: 65 | - mountPath: /etc/nginx/ssl 66 | name: secret-volume 67 | - mountPath: /etc/nginx/conf.d 68 | name: configmap-volume 69 | -------------------------------------------------------------------------------- /tests/data/example-multi-version-crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | # name must match the spec fields below, and be in the form: . 5 | name: crontabs.example.com 6 | spec: 7 | # group name to use for REST API: /apis// 8 | group: example.com 9 | # list of versions supported by this CustomResourceDefinition 10 | versions: 11 | - name: v1beta1 12 | # Each version can be enabled/disabled by Served flag. 13 | served: true 14 | # One and only one version must be marked as the storage version. 15 | storage: true 16 | # A schema is required 17 | schema: 18 | openAPIV3Schema: 19 | type: object 20 | properties: 21 | host: 22 | type: string 23 | port: 24 | type: string 25 | - name: v1 26 | served: true 27 | storage: false 28 | schema: 29 | openAPIV3Schema: 30 | type: object 31 | properties: 32 | host: 33 | type: string 34 | port: 35 | type: string 36 | # The conversion section is introduced in Kubernetes 1.13+ with a default value of 37 | # None conversion (strategy sub-field set to None). 38 | conversion: 39 | # None conversion assumes the same schema for all versions and only sets the apiVersion 40 | # field of custom resources to the proper value 41 | strategy: None 42 | # either Namespaced or Cluster 43 | scope: Namespaced 44 | names: 45 | # plural name to be used in the URL: /apis/// 46 | plural: crontabs 47 | # singular name to be used as an alias on the CLI and for display 48 | singular: crontab 49 | # kind is normally the CamelCased singular type. Your resource manifests use this. 50 | kind: CronTab 51 | # shortNames allow shorter string to match your resource on the CLI 52 | shortNames: 53 | - ct 54 | -------------------------------------------------------------------------------- /tests/test_client_adapter.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import shutil 3 | import ssl 4 | import unittest 5 | from pathlib import Path 6 | from unittest.mock import Mock 7 | from lightkube.config import kubeconfig, client_adapter 8 | from lightkube.config import models 9 | from lightkube import ConfigError 10 | import pytest 11 | import httpx 12 | import asyncio 13 | 14 | BASEDIR = Path("tests") 15 | 16 | 17 | def single_conf(cluster=None, user=None, fname=None): 18 | return kubeconfig.SingleConfig( 19 | context=models.Context(cluster="x"), context_name="x", 20 | cluster=cluster, user=user, fname=fname 21 | ) 22 | 23 | 24 | def test_verify_cluster_insecure(): 25 | cfg = single_conf(cluster=models.Cluster(insecure=True), user=models.User()) 26 | verify = client_adapter.verify_cluster(cfg.cluster, cfg.user, cfg.abs_file) 27 | assert verify.verify_mode is ssl.CERT_NONE 28 | assert not verify.check_hostname 29 | 30 | 31 | def test_verify_cluster_secure(): 32 | cfg = single_conf(cluster=models.Cluster(), user=models.User()) 33 | verify = client_adapter.verify_cluster(cfg.cluster, cfg.user, cfg.abs_file) 34 | assert verify.verify_mode is ssl.CERT_REQUIRED 35 | 36 | 37 | def get_issuer_mata(data: dict): 38 | return {d[0][0]: d[0][1] for d in data['issuer']} 39 | 40 | def test_verify_cluster_ca_path(tmpdir): 41 | tmpdir = Path(tmpdir) 42 | data_dir = Path(__file__).parent.joinpath('data') 43 | shutil.copy(data_dir.joinpath("clientreq.pem"), tmpdir.joinpath("clientreq.pem")) 44 | cluster = models.Cluster(certificate_auth="clientreq.pem") 45 | cfg = single_conf(cluster=cluster, user=models.User(), fname=tmpdir.joinpath("kubeconf")) 46 | verify = client_adapter.verify_cluster(cfg.cluster, cfg.user, cfg.abs_file) 47 | assert get_issuer_mata(verify.get_ca_certs()[0])["organizationName"] == "Example" 48 | 49 | # fname not provided 50 | cfg = single_conf(cluster=models.Cluster(certificate_auth="clientreq.pem"), user=models.User()) 51 | with pytest.raises(ConfigError): 52 | client_adapter.verify_cluster(cfg.cluster, cfg.user, cfg.abs_file) 53 | 54 | # cert path absolute 55 | cluster = models.Cluster(certificate_auth=str(data_dir.joinpath("clientreq.pem"))) 56 | verify = client_adapter.verify_cluster(cluster, cfg.user, cfg.abs_file) 57 | assert get_issuer_mata(verify.get_ca_certs()[0])["organizationName"] == "Example" 58 | 59 | 60 | def test_verify_cluster_ca_data(): 61 | data_dir = Path(__file__).parent.joinpath('data') 62 | cert_data = base64.b64encode(data_dir.joinpath("clientreq.pem").read_bytes()).decode("utf8") 63 | 64 | cluster = models.Cluster(certificate_auth_data=cert_data) 65 | cfg = single_conf(cluster=cluster, user=models.User()) 66 | verify = client_adapter.verify_cluster(cfg.cluster, cfg.user, cfg.abs_file) 67 | assert get_issuer_mata(verify.get_ca_certs()[0])["organizationName"] == "Example" 68 | 69 | 70 | def test_user_cert_missing(): 71 | cfg = single_conf(user=models.User()) 72 | assert client_adapter.user_cert(cfg.user, cfg.abs_file) is None 73 | 74 | 75 | def test_user_cert(tmpdir): 76 | tmpdir = Path(tmpdir) 77 | cfg = single_conf(user=models.User(client_cert="a.crt", client_key="a.key"), fname=tmpdir.joinpath("conf")) 78 | certs = client_adapter.user_cert(cfg.user, cfg.abs_file) 79 | assert certs == (tmpdir.joinpath("a.crt"), tmpdir.joinpath("a.key")) 80 | 81 | 82 | def test_user_cert_data(): 83 | cfg = single_conf(user=models.User(client_cert_data="Y2VydA==", client_key_data="a2V5")) 84 | certs = client_adapter.user_cert(cfg.user, cfg.abs_file) 85 | assert Path(certs[0]).read_text() == "cert" 86 | assert Path(certs[1]).read_text() == "key" 87 | 88 | 89 | @unittest.mock.patch('ssl.create_default_context') 90 | def test_verify_cluster_ca_and_cert(create_default_context): 91 | data_dir = Path(__file__).parent.joinpath('data') 92 | cluster = models.Cluster(certificate_auth=str(data_dir.joinpath("clientreq.pem"))) 93 | cfg = single_conf(cluster=cluster, user=models.User( 94 | client_cert=str(data_dir.joinpath("clientreq.pem")), 95 | client_key=str(data_dir.joinpath("clientkey.pem")) 96 | )) 97 | verify = client_adapter.verify_cluster(cluster, cfg.user, cfg.abs_file) 98 | assert verify is create_default_context.return_value 99 | create_default_context.assert_called_once_with(cafile=str(data_dir.joinpath("clientreq.pem"))) 100 | create_default_context.return_value.load_cert_chain.assert_called_once_with( 101 | str(data_dir.joinpath("clientreq.pem")), 102 | str(data_dir.joinpath("clientkey.pem")) 103 | ) 104 | 105 | 106 | def test_user_auth_missing(): 107 | assert client_adapter.user_auth(None) is None 108 | 109 | 110 | def test_user_auth_empty(): 111 | assert client_adapter.user_auth(models.User()) is None 112 | 113 | 114 | def test_user_auth_basic(): 115 | auth = client_adapter.user_auth(models.User(username="user", password="psw")) 116 | assert isinstance(auth, httpx.BasicAuth) 117 | m = Mock(headers={}) 118 | next(auth.auth_flow(m)) 119 | assert m.headers["Authorization"] == "Basic dXNlcjpwc3c=" 120 | 121 | 122 | def test_user_auth_bearer(): 123 | auth = client_adapter.user_auth(models.User(token="abcd")) 124 | assert isinstance(auth, client_adapter.BearerAuth) 125 | m = Mock(headers={}) 126 | next(auth.auth_flow(m)) 127 | assert m.headers["Authorization"] == "Bearer abcd" 128 | 129 | 130 | def test_user_auth_provider(): 131 | """Auth provider not supported""" 132 | with pytest.raises(ConfigError): 133 | client_adapter.user_auth(models.User(auth_provider={'x': 1})) 134 | 135 | 136 | def test_user_auth_exec_sync(): 137 | auth_script = str(Path(__file__).parent.joinpath('data', 'auth_script.sh')) 138 | auth = client_adapter.user_auth(models.User(exec=models.UserExec( 139 | apiVersion="client.authentication.k8s.io/v1beta1", 140 | command=auth_script, 141 | ))) 142 | assert isinstance(auth, client_adapter.ExecAuth) 143 | m = Mock(headers={}) 144 | next(auth.sync_auth_flow(m)) 145 | assert m.headers["Authorization"] == "Bearer my-bearer-token" 146 | 147 | # call again should cache 148 | m = Mock(headers={}) 149 | flow = auth.sync_auth_flow(m) 150 | next(flow) 151 | assert m.headers["Authorization"] == "Bearer my-bearer-token" 152 | m.headers["Authorization"] = None 153 | 154 | # we pretend the cache is old 155 | flow.send(httpx.Response(status_code=401, request=m)) 156 | assert m.headers["Authorization"] == "Bearer my-bearer-token" 157 | 158 | 159 | def test_user_auth_exec_sync_with_args(): 160 | auth = client_adapter.user_auth(models.User(exec=models.UserExec( 161 | apiVersion="client.authentication.k8s.io/v1beta1", 162 | args=['{"apiVersion":"client.authentication.k8s.io/v1beta1",' 163 | '"kind":"ExecCredential","status":{"token":"my-bearer-token"}}'], 164 | command='echo', 165 | ))) 166 | assert isinstance(auth, client_adapter.ExecAuth) 167 | m = Mock(headers={}) 168 | next(auth.sync_auth_flow(m)) 169 | assert m.headers["Authorization"] == "Bearer my-bearer-token" 170 | 171 | 172 | def test_user_auth_exec_sync_fail(): 173 | auth = client_adapter.user_auth(models.User(exec=models.UserExec( 174 | apiVersion="client.authentication.k8s.io/v1beta1", 175 | command="cp" 176 | ))) 177 | with pytest.raises(ConfigError, match="cp"): 178 | next(auth.sync_auth_flow(Mock(headers={}))) 179 | 180 | 181 | @pytest.mark.asyncio 182 | async def test_user_auth_exec_async(): 183 | auth_script = str(Path(__file__).parent.joinpath('data', 'auth_script.sh')) 184 | auth = client_adapter.user_auth(models.User(exec=models.UserExec( 185 | apiVersion="client.authentication.k8s.io/v1beta1", 186 | command=auth_script, 187 | ))) 188 | 189 | assert isinstance(auth, client_adapter.ExecAuth) 190 | m = Mock(headers={}) 191 | await auth.async_auth_flow(m).__anext__() 192 | assert m.headers["Authorization"] == "Bearer my-bearer-token" 193 | 194 | # call again should cache 195 | m = Mock(headers={}) 196 | flow = auth.async_auth_flow(m) 197 | await flow.__anext__() 198 | assert m.headers["Authorization"] == "Bearer my-bearer-token" 199 | m.headers["Authorization"] = None 200 | 201 | # we pretend the cache is old 202 | await flow.asend(httpx.Response(status_code=401, request=m)) 203 | assert m.headers["Authorization"] == "Bearer my-bearer-token" 204 | with pytest.raises(StopAsyncIteration): 205 | await flow.__anext__() 206 | 207 | 208 | @pytest.mark.asyncio 209 | async def test_user_auth_exec_async_fail(): 210 | auth = client_adapter.user_auth(models.User(exec=models.UserExec( 211 | apiVersion="client.authentication.k8s.io/v1beta1", 212 | command="cp" 213 | ))) 214 | with pytest.raises(ConfigError, match="cp"): 215 | await auth.async_auth_flow(Mock(headers={})).__anext__() 216 | -------------------------------------------------------------------------------- /tests/test_codecs.py: -------------------------------------------------------------------------------- 1 | import textwrap 2 | from pathlib import Path 3 | from unittest import mock 4 | 5 | import pytest 6 | import yaml 7 | 8 | from lightkube import codecs 9 | from lightkube.resources.core_v1 import ConfigMap 10 | from lightkube.resources.rbac_authorization_v1 import Role 11 | from lightkube.models.meta_v1 import ObjectMeta 12 | from lightkube import generic_resource as gr 13 | from lightkube import LoadResourceError 14 | from lightkube.codecs import resource_registry 15 | 16 | data_dir = Path(__file__).parent.joinpath('data') 17 | 18 | 19 | @pytest.fixture(autouse=True) 20 | def cleanup_registry(): 21 | """Cleanup the registry before each test""" 22 | yield 23 | resource_registry.clear() 24 | 25 | 26 | def test_from_dict(): 27 | config_map = codecs.from_dict({ 28 | 'apiVersion': 'v1', 29 | 'kind': 'ConfigMap', 30 | 'metadata': {'name': 'config-name', 'labels': {'label1': 'value1'}}, 31 | 'data': { 32 | 'file1.txt': 'some content here', 33 | 'file2.txt': 'some other content' 34 | } 35 | }) 36 | assert isinstance(config_map, ConfigMap) 37 | assert config_map.kind == 'ConfigMap' 38 | assert config_map.apiVersion == 'v1' 39 | assert config_map.metadata.name == 'config-name' 40 | assert config_map.metadata.labels['label1'] == 'value1' 41 | assert config_map.data['file1.txt'] == 'some content here' 42 | assert config_map.data['file2.txt'] == 'some other content' 43 | 44 | role = codecs.from_dict({ 45 | 'apiVersion': 'rbac.authorization.k8s.io/v1', 46 | 'kind': 'Role', 47 | 'metadata': {'name': 'read-pod'}, 48 | 'rules': [{ 49 | 'apiGroup': '', 50 | 'resources': ['pods'], 51 | 'verbs': ['get','watch', 'list'] 52 | }] 53 | }) 54 | assert isinstance(role, Role) 55 | assert role.kind == 'Role' 56 | assert role.apiVersion == 'rbac.authorization.k8s.io/v1' 57 | assert role.metadata.name == 'read-pod' 58 | assert role.rules[0].resources == ['pods'] 59 | 60 | 61 | def test_from_dict_wrong_model(): 62 | # provided argument must actually be a dict 63 | with pytest.raises(LoadResourceError, match='.*not a dict'): 64 | codecs.from_dict([]) 65 | 66 | # apiVersion and kind are required 67 | with pytest.raises(LoadResourceError, match=".*key 'apiVersion' missing"): 68 | codecs.from_dict({ 69 | 'kind': 'ConfigMap', 70 | 'metadata': {'name': 'config-name'}, 71 | }) 72 | 73 | 74 | def test_from_dict_generic_res(): 75 | Mydb = gr.create_namespaced_resource('myapp.com', 'v1', 'Mydb', 'mydbs') 76 | db = codecs.from_dict({ 77 | 'apiVersion': 'myapp.com/v1', 78 | 'kind': 'Mydb', 79 | 'metadata': {'name': 'db1'}, 80 | 'key': {'a': 'b', 'c': 'd'} 81 | }) 82 | assert isinstance(db, Mydb) 83 | assert db.kind == 'Mydb' 84 | assert db.apiVersion == 'myapp.com/v1' 85 | assert db.metadata.name == 'db1' 86 | assert 'key' in db 87 | assert db['key'] == {'a': 'b', 'c': 'd'} 88 | 89 | # Try with a generic resource with .k8s.io version 90 | # https://github.com/gtsystem/lightkube/issues/18 91 | version = "testing.k8s.io/v1" 92 | kind = "Testing" 93 | group, version_n = version.split("/") 94 | Testing = gr.create_namespaced_resource(group=group, version=version_n, kind=kind, 95 | plural=f"{kind.lower()}s") 96 | testing = codecs.from_dict({ 97 | 'apiVersion': version, 98 | 'kind': kind, 99 | 'metadata': {'name': 'testing1'}, 100 | 'key': {'a': 'b', 'c': 'd'} 101 | }) 102 | assert isinstance(testing, Testing) 103 | assert testing.kind == kind 104 | assert testing.apiVersion == version 105 | assert testing.metadata.name == 'testing1' 106 | assert 'key' in testing 107 | assert testing['key'] == {'a': 'b', 'c': 'd'} 108 | 109 | 110 | def test_from_dict_not_found(): 111 | with pytest.raises(LoadResourceError): 112 | codecs.from_dict({'apiVersion': 'myapp2.com/v1', 'kind': 'Mydb'}) 113 | 114 | with pytest.raises(LoadResourceError): 115 | codecs.from_dict({'apiVersion': 'v1', 'kind': 'Missing'}) 116 | 117 | with pytest.raises(LoadResourceError): 118 | codecs.from_dict({'apiVersion': 'extra/v1', 'kind': 'Missing'}) 119 | 120 | # Try with an undefined generic resource with .k8s.io version 121 | # https://github.com/gtsystem/lightkube/issues/18 122 | with pytest.raises(LoadResourceError): 123 | codecs.from_dict({'apiVersion': "undefined.k8s.io/v1", 'kind': 'Missing'}) 124 | 125 | @pytest.mark.parametrize( 126 | "yaml_file", 127 | ( 128 | "example-def.yaml", 129 | "example-def-with-nulls.yaml", 130 | "example-def-with-lists.yaml" 131 | ) 132 | ) 133 | def test_load_all_yaml_static(yaml_file): 134 | gr.create_namespaced_resource('myapp.com', 'v1', 'Mydb', 'mydbs') 135 | objs = list(codecs.load_all_yaml(data_dir.joinpath(yaml_file).read_text())) 136 | kinds = [o.kind for o in objs] 137 | 138 | assert kinds == ['Secret', 'Mydb', 'Service', 'Deployment'] 139 | 140 | with data_dir.joinpath('example-def.yaml').open() as f: 141 | objs = list(codecs.load_all_yaml(f)) 142 | kinds = [o.kind for o in objs] 143 | 144 | assert kinds == ['Secret', 'Mydb', 'Service', 'Deployment'] 145 | 146 | 147 | def test_load_all_yaml_template(): 148 | gr.create_namespaced_resource('myapp.com', 'v1', 'Mydb', 'mydbs') 149 | objs = list(codecs.load_all_yaml( 150 | data_dir.joinpath('example-def.tmpl').read_text(), 151 | context={'test': 'xyz'}) 152 | ) 153 | kinds = [o.kind for o in objs] 154 | 155 | assert kinds == ['Secret', 'Mydb', 'Service', 'Deployment'] 156 | assert objs[1].metadata.name == 'bla-xyz' 157 | 158 | with data_dir.joinpath('example-def.tmpl').open() as f: 159 | objs = list(codecs.load_all_yaml(f, context={'test': 'xyz'})) 160 | kinds = [o.kind for o in objs] 161 | 162 | assert kinds == ['Secret', 'Mydb', 'Service', 'Deployment'] 163 | assert objs[1].metadata.name == 'bla-xyz' 164 | 165 | 166 | def test_load_all_yaml_template_env(): 167 | gr.create_namespaced_resource('myapp.com', 'v1', 'Mydb', 'mydbs') 168 | import jinja2 169 | env = jinja2.Environment() 170 | env.globals['test'] = 'global' 171 | 172 | objs = list(codecs.load_all_yaml( 173 | data_dir.joinpath('example-def.tmpl').read_text(), 174 | context={}, 175 | template_env=env) 176 | ) 177 | kinds = [o.kind for o in objs] 178 | 179 | assert kinds == ['Secret', 'Mydb', 'Service', 'Deployment'] 180 | assert objs[1].metadata.name == 'bla-global' 181 | 182 | with data_dir.joinpath('example-def.tmpl').open() as f: 183 | objs = list(codecs.load_all_yaml(f, context={}, template_env=env)) 184 | kinds = [o.kind for o in objs] 185 | 186 | assert kinds == ['Secret', 'Mydb', 'Service', 'Deployment'] 187 | assert objs[1].metadata.name == 'bla-global' 188 | 189 | # template_env is not an environment 190 | with pytest.raises(LoadResourceError, match='.*valid jinja2 template'): 191 | codecs.load_all_yaml( 192 | data_dir.joinpath('example-def.tmpl').read_text(), 193 | context={}, 194 | template_env={} 195 | ) 196 | 197 | 198 | def test_load_all_yaml_all_null(): 199 | yaml_file = "example-def-null.yaml" 200 | objs = list(codecs.load_all_yaml(data_dir.joinpath(yaml_file).read_text())) 201 | assert len(objs) == 0 202 | 203 | 204 | @mock.patch('lightkube.codecs.jinja2', new=None) 205 | def test_load_all_yaml_missing_dependency(): 206 | with pytest.raises(ImportError, match='.*requires jinja2.*'): 207 | codecs.load_all_yaml( 208 | data_dir.joinpath('example-def.tmpl').read_text(), 209 | context={'test': 'xyz'} 210 | ) 211 | 212 | 213 | @pytest.mark.parametrize( 214 | "create_resources_for_crds", 215 | [ 216 | True, # generic resources should be created 217 | False, # no generic resources should be created 218 | ] 219 | ) 220 | def test_load_all_yaml_creating_generic_resources(create_resources_for_crds): 221 | template_yaml = data_dir.joinpath('example-multi-version-crd.yaml').read_text() 222 | template_dict = list(yaml.safe_load_all(template_yaml))[0] 223 | 224 | expected_group = template_dict["spec"]["group"] 225 | expected_kind = template_dict["spec"]["names"]["kind"] 226 | 227 | # Confirm no generic resources exist before testing 228 | assert len(resource_registry._registry) == 0 229 | 230 | objs = list(codecs.load_all_yaml( 231 | template_yaml, 232 | create_resources_for_crds=create_resources_for_crds, 233 | )) 234 | 235 | # Confirm expected resources exist 236 | if create_resources_for_crds: 237 | for version in template_dict["spec"]["versions"]: 238 | resource = resource_registry.get(f"{expected_group}/{version['name']}", expected_kind) 239 | assert resource is not None 240 | 241 | # Confirm we did not make any extra resources 242 | # + 1 as CustomResourceDefinition is also added to the registry 243 | assert len(resource_registry._registry) == len(template_dict["spec"]["versions"]) + 1 244 | else: 245 | # Confirm we did not make any resources except CustomResourceDefinition 246 | assert len(resource_registry._registry) == 1 247 | 248 | assert len(objs) == 1 249 | 250 | 251 | def test_dump_all_yaml(): 252 | cm = ConfigMap( 253 | apiVersion='v1', kind='ConfigMap', 254 | metadata=ObjectMeta(name='xyz', labels={'x': 'y'}) 255 | ) 256 | Mydb = gr.create_namespaced_resource('myapp.com', 'v1', 'Mydb', 'mydbs') 257 | 258 | db = Mydb( 259 | apiVersion='myapp.com/v1', kind='Mydb', 260 | metadata=ObjectMeta(name='db1'), xyz={'a': 'b'} 261 | ) 262 | 263 | res = codecs.dump_all_yaml([cm, db]) 264 | expected = textwrap.dedent(""" 265 | apiVersion: v1 266 | kind: ConfigMap 267 | metadata: 268 | labels: 269 | x: y 270 | name: xyz 271 | --- 272 | apiVersion: myapp.com/v1 273 | kind: Mydb 274 | metadata: 275 | name: db1 276 | xyz: 277 | a: b 278 | """).lstrip() 279 | assert res == expected 280 | 281 | res = codecs.dump_all_yaml([db, cm], indent=4) 282 | expected = textwrap.dedent(""" 283 | apiVersion: myapp.com/v1 284 | kind: Mydb 285 | metadata: 286 | name: db1 287 | xyz: 288 | a: b 289 | --- 290 | apiVersion: v1 291 | kind: ConfigMap 292 | metadata: 293 | labels: 294 | x: y 295 | name: xyz 296 | """).lstrip() 297 | assert res == expected 298 | 299 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from unittest.mock import patch 4 | 5 | import pytest 6 | 7 | from lightkube.config import kubeconfig 8 | from lightkube.core import exceptions 9 | 10 | 11 | def test_from_server(): 12 | cfg = kubeconfig.KubeConfig.from_server("http://testserver.com").get() 13 | assert cfg.context_name == "default" 14 | assert cfg.namespace == "default" 15 | assert cfg.cluster.server == "http://testserver.com" 16 | assert cfg.user is None 17 | 18 | cfg = kubeconfig.KubeConfig.from_server("http://testserver.com", namespace="ns").get() 19 | assert cfg.context_name == "default" 20 | assert cfg.namespace == "ns" 21 | 22 | 23 | @pytest.fixture() 24 | def cfg(): 25 | fname = Path("tests").joinpath("test_config.yaml") 26 | return kubeconfig.KubeConfig.from_file(fname) 27 | 28 | 29 | def test_from_file(cfg): 30 | c = cfg.get() 31 | assert c.context_name == 'ctx11' 32 | assert c.user.username == 'u1' 33 | assert c.user.password == 'p1' 34 | assert c.cluster.server == 'server1' 35 | assert c.context.user == 'user1' 36 | assert c.namespace == kubeconfig.DEFAULT_NAMESPACE 37 | 38 | c = cfg.get(context_name='ctx12') 39 | assert c.context_name == 'ctx12' 40 | assert c.user.token == 'ABC' 41 | assert c.cluster.server == 'server1' 42 | assert c.context.user == 'user2' 43 | assert c.namespace == kubeconfig.DEFAULT_NAMESPACE 44 | 45 | c = cfg.get(context_name='ctx21') 46 | assert c.context_name == 'ctx21' 47 | assert c.user.username == 'u1' 48 | assert c.cluster.server == 'server2' 49 | assert c.context.cluster == 'cl2' 50 | assert c.namespace == 'ns21' 51 | 52 | 53 | def test_from_file_miss_config(cfg): 54 | # non existing context raise an exception 55 | with pytest.raises(exceptions.ConfigError): 56 | assert cfg.get(context_name='ctx22') 57 | 58 | # if default context is missing, raise an exception 59 | cfg.current_context = None 60 | with pytest.raises(exceptions.ConfigError): 61 | assert cfg.get() 62 | 63 | # default context is missing, but a default is provided 64 | c = cfg.get(default=kubeconfig.PROXY_CONF) 65 | assert c is kubeconfig.PROXY_CONF 66 | 67 | 68 | def test_from_dict(): 69 | cfg = kubeconfig.KubeConfig.from_dict({ 70 | 'clusters': [{'name': 'cl1', 'cluster': {'server': 'a'}}], 71 | 'contexts': [{'name': 'a', 'context': {'cluster': 'cl1', 'namespace': 'ns'}}] 72 | }) 73 | assert cfg.current_context is None 74 | assert cfg.clusters['cl1'].server == 'a' 75 | assert cfg.contexts['a'].namespace == 'ns' 76 | 77 | c = cfg.get('a') 78 | assert c.namespace == 'ns' 79 | assert c.cluster.server == 'a' 80 | 81 | 82 | @pytest.fixture 83 | def service_account(tmpdir): 84 | tmpdir = Path(tmpdir) 85 | tmpdir.joinpath("namespace").write_text("my-namespace") 86 | tmpdir.joinpath("token").write_text("ABCD") 87 | tmpdir.joinpath("ca.crt").write_text("...bla...") 88 | 89 | os.environ["KUBERNETES_SERVICE_HOST"] = "k8s.local" 90 | os.environ["KUBERNETES_SERVICE_PORT"] = "9443" 91 | return tmpdir 92 | 93 | 94 | def test_from_service_account(service_account): 95 | cfg = kubeconfig.KubeConfig.from_service_account(service_account) 96 | c = cfg.get() 97 | assert c.namespace == "my-namespace" 98 | assert c.user.token == "ABCD" 99 | assert c.cluster.server == "https://k8s.local:9443" 100 | 101 | 102 | def test_from_service_account_not_found(tmpdir): 103 | with pytest.raises(exceptions.ConfigError): 104 | kubeconfig.KubeConfig.from_service_account(tmpdir) 105 | 106 | 107 | def test_from_file_not_found(tmpdir): 108 | with pytest.raises(exceptions.ConfigError): 109 | kubeconfig.KubeConfig.from_file(Path(tmpdir).joinpath("bla")) 110 | 111 | 112 | def test_from_env(service_account): 113 | cfg = kubeconfig.KubeConfig.from_env(service_account) 114 | assert cfg.get().user.token == "ABCD" 115 | 116 | with patch("lightkube.config.kubeconfig.os.environ") as environ: 117 | environ.get.return_value = str(Path("tests").joinpath("test_config.yaml")) 118 | cfg = kubeconfig.KubeConfig.from_env(service_account.joinpath("xyz")) 119 | assert cfg.get().context_name == 'ctx11' 120 | environ.get.assert_called_with("KUBECONFIG", kubeconfig.DEFAULT_KUBECONFIG) 121 | 122 | with patch("lightkube.config.kubeconfig.os.environ") as environ: 123 | environ.get.return_value = str(Path("tests").joinpath("test_config.yaml")) 124 | cfg = kubeconfig.KubeConfig.from_env(service_account.joinpath("xyz"), default_config='/tmp/bla') 125 | assert cfg.get().context_name == 'ctx11' 126 | environ.get.assert_called_with("KUBECONFIG", '/tmp/bla') 127 | -------------------------------------------------------------------------------- /tests/test_config.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: cl1 3 | cluster: {server: server1} 4 | - name: cl2 5 | cluster: {server: server2} 6 | users: 7 | - name: user1 8 | user: 9 | username: u1 10 | password: p1 11 | - name: user2 12 | user: 13 | token: ABC 14 | contexts: 15 | - name: ctx11 16 | context: 17 | cluster: cl1 18 | user: user1 19 | - name: ctx12 20 | context: 21 | cluster: cl1 22 | user: user2 23 | - name: ctx21 24 | context: 25 | cluster: cl2 26 | user: user1 27 | namespace: ns21 28 | current-context: ctx11 29 | 30 | -------------------------------------------------------------------------------- /tests/test_config_default_user.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: a_cluster 3 | cluster: {} 4 | contexts: 5 | - name: a_context 6 | context: 7 | cluster: a_cluster 8 | user: default 9 | -------------------------------------------------------------------------------- /tests/test_config_exec.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: a_cluster 3 | cluster: {} 4 | contexts: 5 | - name: ctx1 6 | context: 7 | cluster: a_cluster 8 | user: execuser 9 | - name: ctx2 10 | context: 11 | cluster: a_cluster 12 | user: execuser2 13 | current-context: ctx1 14 | users: 15 | - name: execuser 16 | user: 17 | exec: 18 | apiVersion: client.authentication.k8s.io/v1beta1 19 | args: 20 | - '{"apiVersion":"client.authentication.k8s.io/v1beta1","kind":"ExecCredential","status":{"token":"my-bearer-token"}}' 21 | command: echo 22 | - name: execuser2 23 | user: 24 | exec: 25 | apiVersion: client.authentication.k8s.io/v1beta1 26 | args: ["-c", "echo $MYENV"] 27 | command: bash 28 | env: 29 | - name: MYENV 30 | value: '{"apiVersion":"client.authentication.k8s.io/v1beta1","kind":"ExecCredential","status":{"token":"my-bearer-token"}}' 31 | -------------------------------------------------------------------------------- /tests/test_config_user_password.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: a_cluster 3 | cluster: {} 4 | contexts: 5 | - name: ctx 6 | context: 7 | cluster: a_cluster 8 | user: execuser 9 | current-context: ctx 10 | users: 11 | - name: execuser 12 | user: 13 | username: bla 14 | password: bla123 15 | 16 | -------------------------------------------------------------------------------- /tests/test_dataclasses_dict.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from dataclasses import dataclass, field 3 | from datetime import datetime, timezone 4 | 5 | import pytest 6 | 7 | from lightkube.core.dataclasses_dict import DataclassDictMixIn 8 | 9 | 10 | @dataclass 11 | class B(DataclassDictMixIn): 12 | b1: str 13 | b2: 'A' = None 14 | b3: 'dict' = None 15 | 16 | 17 | @dataclass 18 | class C(DataclassDictMixIn): 19 | c1: str 20 | c2: List['A'] = None 21 | c3: str = field(metadata={"json": "$ref"}, default=None) 22 | 23 | 24 | @dataclass 25 | class A(DataclassDictMixIn): 26 | a1: str 27 | a2: int = 0 28 | a3: 'bool' = False 29 | 30 | 31 | @dataclass 32 | class DT(DataclassDictMixIn): 33 | dt: 'datetime' 34 | 35 | 36 | @dataclass 37 | class Def(DataclassDictMixIn): 38 | d1: str 39 | d2: int = 2 40 | d3: 'bool' = False 41 | d4: str = "ok" 42 | 43 | 44 | @pytest.mark.parametrize("lazy", [True, False]) 45 | def test_lazy_default_not_hiding_added(lazy): 46 | # Setup a C object without setting c2 (default) 47 | inst = C.from_dict({"c1": "val"}, lazy=lazy) 48 | assert inst.to_dict() == {"c1": "val"} 49 | inst.c2 = [A("def")] 50 | assert inst.to_dict() == {"c1": "val", "c2": [{"a1": "def"}]} 51 | 52 | 53 | @pytest.mark.parametrize("lazy", [True, False]) 54 | def test_lazy_loaded_not_hiding_set(lazy): 55 | # Setup a C object without setting c2 (default) 56 | inst = C.from_dict({"c1": "val", "c2": [{"a1": "abc"}]}, lazy=lazy) 57 | assert inst.to_dict() == {"c1": "val", "c2": [{"a1": "abc"}]} 58 | # Change c2 list attribute 59 | inst.c2 = [A("def")] 60 | assert inst.to_dict() == {"c1": "val", "c2": [{"a1": "def"}]} 61 | 62 | 63 | @pytest.mark.parametrize("lazy", [True, False]) 64 | def test_lazy_loaded_not_hiding_cleared(lazy): 65 | # Setup a C object without setting c2 (default) 66 | inst = C.from_dict({"c1": "val", "c2": [{"a1": "abc"}]}, lazy=lazy) 67 | assert inst.to_dict() == {"c1": "val", "c2": [{"a1": "abc"}]} 68 | # Change c2 list attribute 69 | inst.c2 = None 70 | assert inst.to_dict() == {"c1": "val"} 71 | 72 | 73 | @pytest.mark.parametrize("lazy", [True, False]) 74 | def test_single(lazy): 75 | a = A.from_dict({'a1': 'a', 'a3': True}, lazy=lazy) 76 | assert a.a1 == 'a' 77 | assert a.a2 == 0 78 | assert a.a3 is True 79 | assert a.to_dict() == {'a1': 'a', 'a3': True} 80 | 81 | 82 | @pytest.mark.parametrize("lazy", [True, False]) 83 | def test_nasted(lazy): 84 | b = B.from_dict({'b1': 'ok', 'b2': {'a1': 'a', 'a3': True}}, lazy=lazy) 85 | assert b.b1 == 'ok' 86 | if lazy: # when we use lazy, sub-objects are not expanded yet 87 | assert 'b2' not in vars(b) 88 | else: 89 | assert 'b2' in vars(b) 90 | assert b.b2.a3 is True 91 | assert b.to_dict() == {'b1': 'ok', 'b2': {'a1': 'a', 'a3': True}} 92 | 93 | 94 | @pytest.mark.parametrize("lazy", [True, False]) 95 | def test_nasted_in_list(lazy): 96 | c = C.from_dict({'c1': 'ok', 'c2': [{'a1': 'a', 'a3': True}, {'a1': 'b'}]}, lazy=lazy) 97 | if lazy: # when we use lazy, sub-objects are not expanded yet 98 | assert 'c2' not in vars(c) 99 | else: 100 | assert 'c2' in vars(c) 101 | assert c.c2[0].a3 is True 102 | assert c.c2[1].a1 == 'b' 103 | assert c.to_dict() == {'c1': 'ok', 'c2': [ 104 | {'a1': 'a', 'a3': True}, {'a1': 'b'} 105 | ]} 106 | 107 | 108 | def test_nasted_to_dict(): 109 | b = B.from_dict({'b1': 'ok', 'b2': {'a1': 'a', 'a3': True}}, lazy=True) 110 | assert b.to_dict() == {'b1': 'ok', 'b2': {'a1': 'a', 'a3': True}} 111 | 112 | 113 | @pytest.mark.parametrize("lazy", [True, False]) 114 | def test_dict(lazy): 115 | b = B.from_dict({'b1': 'ok', 'b3': {'xx': 'x'}}, lazy=lazy) 116 | assert b.to_dict() == {'b1': 'ok', 'b3': {'xx': 'x'}} 117 | 118 | 119 | def test_datatime(): 120 | """Datetime get converted to string and back""" 121 | d = DT.from_dict({'dt': '2019-08-03T11:32:48Z'}) 122 | assert d.dt == datetime(2019, 8, 3, 11, 32, 48, tzinfo=timezone.utc) 123 | assert d.to_dict() == {'dt': '2019-08-03T11:32:48Z'} 124 | 125 | d = DT.from_dict({'dt': '2019-08-03T11:32:48+02:30'}) 126 | assert isinstance(d.dt, datetime) and str(d.dt) == '2019-08-03 11:32:48+02:30' 127 | assert d.to_dict() == {'dt': '2019-08-03T11:32:48+02:30'} 128 | 129 | 130 | @pytest.mark.parametrize("lazy", [True, False]) 131 | def test_rename(lazy): 132 | """We can rename fields from/to dicts""" 133 | c = C.from_dict({'c1': 'a', '$ref': 'b'}, lazy=lazy) 134 | assert c.c1 == 'a' 135 | assert c.c3 == 'b' 136 | c.c3 = 'c' 137 | 138 | assert c.to_dict() == {'c1': 'a', '$ref': 'c'} 139 | 140 | 141 | @pytest.mark.parametrize("lazy", [True, False]) 142 | def test_drop_unknown(lazy): 143 | """Unknown attributes are dropped""" 144 | c = C.from_dict({'c1': 'a', 'k': 'b'}, lazy=lazy) 145 | assert c.c1 == 'a' 146 | assert not hasattr(c, 'k') 147 | 148 | assert c.to_dict() == {'c1': 'a'} 149 | 150 | 151 | def test_default_not_encoded(): 152 | """Test that default values are not returned in the dict""" 153 | assert Def(d1='a').to_dict() == {'d1': 'a'} 154 | assert Def(d1='a', d2=2).to_dict() == {'d1': 'a'} 155 | assert Def(d1='a', d2=0).to_dict() == {'d1': 'a', 'd2': 0} 156 | assert Def(d1='a', d3=False).to_dict() == {'d1': 'a'} 157 | assert Def(d1='a', d3=True).to_dict() == {'d1': 'a', 'd3': True} 158 | assert Def(d1='a', d4='ok').to_dict() == {'d1': 'a'} 159 | assert Def(d1='a', d4='ko').to_dict() == {'d1': 'a', 'd4': 'ko'} 160 | -------------------------------------------------------------------------------- /tests/test_generic_resource.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from unittest import mock 3 | 4 | from lightkube import generic_resource as gr 5 | from lightkube.core.generic_client import GenericClient 6 | from lightkube.models.meta_v1 import ObjectMeta 7 | from lightkube.resources.apiextensions_v1 import CustomResourceDefinition 8 | from lightkube.models.apiextensions_v1 import ( 9 | CustomResourceDefinitionNames, 10 | CustomResourceDefinitionSpec, 11 | CustomResourceDefinitionVersion, 12 | ) 13 | from lightkube.core.resource_registry import resource_registry 14 | 15 | 16 | def create_dummy_crd(group="thisgroup", kind="thiskind", plural="thiskinds", scope="Namespaced", 17 | versions=None): 18 | if versions is None: 19 | versions = ['v1alpha1', 'v1'] 20 | 21 | crd = CustomResourceDefinition( 22 | spec=CustomResourceDefinitionSpec( 23 | group=group, 24 | names=CustomResourceDefinitionNames( 25 | kind=kind, 26 | plural=plural, 27 | ), 28 | scope=scope, 29 | versions=[ 30 | CustomResourceDefinitionVersion( 31 | name=version, 32 | served=True, 33 | storage=True, 34 | ) for version in versions 35 | ], 36 | ) 37 | ) 38 | 39 | return crd 40 | 41 | 42 | @pytest.fixture(autouse=True) 43 | def cleanup_registry(): 44 | """Cleanup the registry before each test""" 45 | yield 46 | resource_registry.clear() 47 | 48 | 49 | class MockedClient(GenericClient): 50 | def __init__(self): 51 | self.namespace = 'default' 52 | self._field_manager = None 53 | self._dry_run = False 54 | 55 | 56 | @pytest.fixture() 57 | def mocked_client_list_crds(): 58 | """Yields a Client with a mocked .list which returns a fixed list of CRDs 59 | 60 | **returns** Tuple of: mocked `Client`, list of CRDs, integer number of resources defined by 61 | CRDs 62 | """ 63 | scopes = ["Namespaced", "Cluster"] 64 | version_names = ['v2', 'v3'] 65 | 66 | crds = [create_dummy_crd(scope=scope, kind=scope, versions=version_names) for scope in scopes] 67 | expected_n_resources = len(version_names) * len(crds) 68 | 69 | with mock.patch("lightkube.Client") as client_maker: 70 | mocked_client = mock.MagicMock() 71 | mocked_client.list.return_value = crds 72 | client_maker.return_value = mocked_client 73 | yield mocked_client, crds, expected_n_resources 74 | 75 | 76 | class AsyncIterator: 77 | """Provides a `async for` compatible iterator 78 | 79 | Pattern taken from https://stackoverflow.com/a/36724229/5394584 80 | """ 81 | def __init__(self, seq): 82 | self.iter = iter(seq) 83 | 84 | def __aiter__(self): 85 | return self 86 | 87 | async def __anext__(self): 88 | try: 89 | return next(self.iter) 90 | except StopIteration: 91 | raise StopAsyncIteration 92 | 93 | 94 | @pytest.mark.asyncio 95 | @pytest.fixture() 96 | def mocked_asyncclient_list_crds(): 97 | """Yields an AsyncClient with a mocked .list which returns a fixed list of CRDs 98 | 99 | **returns** Tuple of: mocked `AsyncClient`, list of CRDs, integer number of resources defined by 100 | CRDs 101 | """ 102 | scopes = ["Namespaced", "Cluster"] 103 | version_names = ['v2', 'v3'] 104 | 105 | crds = [create_dummy_crd(scope=scope, kind=scope, versions=version_names) for scope in scopes] 106 | asynccrds = AsyncIterator(crds) 107 | expected_n_resources = len(version_names) * len(crds) 108 | 109 | with mock.patch("lightkube.AsyncClient") as client_maker: 110 | # This can be removed when python < 3.8 is not supported 111 | try: 112 | mocked_client = mock.AsyncMock() 113 | except AttributeError: 114 | import asyncmock 115 | mocked_client = asyncmock.AsyncMock() 116 | 117 | # AsyncClient.list is not async, but AsyncMock will automatically generate it as async. 118 | # Instead, mock it explicitly with a regular MagicMock 119 | mocked_list = mock.MagicMock() 120 | mocked_list.return_value = asynccrds 121 | mocked_client.list = mocked_list 122 | client_maker.return_value = mocked_client 123 | yield mocked_client, crds, expected_n_resources 124 | 125 | 126 | def test_create_namespaced_resource(): 127 | c = MockedClient() 128 | Test = gr.create_namespaced_resource('test.eu', 'v1', 'TestN', 'tests') 129 | assert Test.__name__ == 'TestN' 130 | 131 | pr = c.prepare_request('get', Test, name='xx', namespace='myns') 132 | assert pr.method == 'GET' 133 | assert pr.url == 'apis/test.eu/v1/namespaces/myns/tests/xx' 134 | 135 | pr = c.prepare_request('list', Test, namespace='myns') 136 | assert pr.method == 'GET' 137 | assert pr.url == 'apis/test.eu/v1/namespaces/myns/tests' 138 | 139 | pr = c.prepare_request('post', obj=Test(metadata={'namespace': 'myns'}, spec={'a': 1})) 140 | assert pr.method == 'POST' 141 | assert pr.url == 'apis/test.eu/v1/namespaces/myns/tests' 142 | assert pr.data == {'apiVersion': 'test.eu/v1', 'kind': 'TestN', 'spec': {'a': 1}, 'metadata': {'namespace': 'myns'}} 143 | 144 | pr = c.prepare_request('get', Test.Scale, name='xx', namespace='myns') 145 | assert pr.method == 'GET' 146 | assert pr.url == 'apis/test.eu/v1/namespaces/myns/tests/xx/scale' 147 | 148 | pr = c.prepare_request('get', Test.Status, name='xx', namespace='myns') 149 | assert pr.method == 'GET' 150 | assert pr.url == 'apis/test.eu/v1/namespaces/myns/tests/xx/status' 151 | 152 | 153 | def test_create_global_resource(): 154 | c = MockedClient() 155 | Test = gr.create_global_resource('test.eu', 'v1', 'TestG', 'tests') 156 | assert Test.__name__ == 'TestG' 157 | 158 | pr = c.prepare_request('get', Test, name='xx') 159 | assert pr.method == 'GET' 160 | assert pr.url == 'apis/test.eu/v1/tests/xx' 161 | 162 | pr = c.prepare_request('list', Test) 163 | assert pr.method == 'GET' 164 | assert pr.url == 'apis/test.eu/v1/tests' 165 | 166 | pr = c.prepare_request('post', obj=Test(spec={'a': 1})) 167 | assert pr.method == 'POST' 168 | assert pr.url == 'apis/test.eu/v1/tests' 169 | assert pr.data == {'apiVersion': 'test.eu/v1', 'kind': 'TestG', 'spec': {'a': 1}} 170 | 171 | pr = c.prepare_request('get', Test.Scale, name='xx') 172 | assert pr.method == 'GET' 173 | assert pr.url == 'apis/test.eu/v1/tests/xx/scale' 174 | 175 | pr = c.prepare_request('get', Test.Status, name='xx') 176 | assert pr.method == 'GET' 177 | assert pr.url == 'apis/test.eu/v1/tests/xx/status' 178 | 179 | 180 | @pytest.mark.parametrize( 181 | "crd_scope", 182 | [ 183 | "Namespaced", 184 | "Cluster", 185 | ] 186 | ) 187 | def test_create_resources_from_crd(crd_scope): 188 | version_names = ['v1alpha1', 'v1', 'v2'] 189 | crd = create_dummy_crd(scope=crd_scope, versions=version_names) 190 | 191 | # Confirm no generic resources exist before testing 192 | assert len(resource_registry._registry) == 0 193 | 194 | # Test the function 195 | gr.create_resources_from_crd(crd) 196 | 197 | # Confirm expected number of resources created 198 | assert len(resource_registry._registry) == len(version_names) 199 | 200 | # Confirm expected resources exist 201 | for version in version_names: 202 | resource = gr.get_generic_resource(f"{crd.spec.group}/{version}", crd.spec.names.kind) 203 | assert resource is not None 204 | 205 | 206 | def test_generic_model(): 207 | mod = gr.Generic.from_dict({'metadata': {'name': 'bla'}, 'test': {'ok': 4}}) 208 | assert mod.metadata.name == 'bla' 209 | assert mod.test['ok'] == 4 210 | assert mod.to_dict() == {'metadata': {'name': 'bla'}, 'test': {'ok': 4}} 211 | assert mod.status is None 212 | 213 | mod = gr.Generic.from_dict({'apiVersion': 'v1', 'kind': 'Test', 'status': 1}) 214 | assert mod.apiVersion == 'v1' 215 | assert mod.kind == 'Test' 216 | assert mod.metadata is None 217 | assert mod.to_dict() == {'apiVersion': 'v1', 'kind': 'Test', 'status': 1} 218 | assert mod.status == 1 219 | 220 | mod = gr.Generic(metadata=ObjectMeta(name='bla'), test={'ok': 4}) 221 | assert mod.metadata.name == 'bla' 222 | assert mod.test['ok'] == 4 223 | assert mod.to_dict() == {'metadata': {'name': 'bla'}, 'test': {'ok': 4}} 224 | assert mod.status is None 225 | 226 | with pytest.raises(AttributeError): 227 | mod._a 228 | 229 | 230 | def test_load_in_cluster_generic_resources(mocked_client_list_crds): 231 | """Test that load_in_cluster_generic_resources creates generic resources for crds in cluster""" 232 | # Set up environment 233 | mocked_client, expected_crds, expected_n_resources = mocked_client_list_crds 234 | 235 | # Confirm no generic resources exist before testing 236 | assert len(resource_registry._registry) == 0 237 | 238 | # Test the function 239 | gr.load_in_cluster_generic_resources(mocked_client) 240 | 241 | # Confirm the expected resources and no others were created 242 | assert len(resource_registry._registry) == expected_n_resources 243 | for crd in expected_crds: 244 | for version in crd.spec.versions: 245 | resource = gr.get_generic_resource(f"{crd.spec.group}/{version.name}", crd.spec.names.kind) 246 | assert resource is not None 247 | 248 | mocked_client.list.assert_called_once() 249 | 250 | 251 | @pytest.mark.asyncio 252 | async def test_async_load_in_cluster_generic_resources(mocked_asyncclient_list_crds): 253 | """Test that async_load_in_cluster_generic_resources creates generic resources for crds in cluster""" 254 | # Set up environment 255 | mocked_client, expected_crds, expected_n_resources = mocked_asyncclient_list_crds 256 | 257 | # Confirm no generic resources exist before testing 258 | assert len(resource_registry._registry) == 0 259 | 260 | # Test the function 261 | await gr.async_load_in_cluster_generic_resources(mocked_client) 262 | 263 | # Confirm the expected resources and no others were created 264 | assert len(resource_registry._registry) == expected_n_resources 265 | for crd in expected_crds: 266 | for version in crd.spec.versions: 267 | resource = gr.get_generic_resource(f"{crd.spec.group}/{version.name}", crd.spec.names.kind) 268 | assert resource is not None 269 | 270 | # This only works for python >3.8, not for the asyncmock package needed in <3.8 271 | # mocked_client.list.assert_called_once() 272 | 273 | 274 | def test_scale_model(): 275 | """Test we are using the right model here""" 276 | Test = gr.create_global_resource('test.eu', 'v1', 'TestS', 'tests') 277 | a = Test.Scale.from_dict({'spec': {'replicas': 2}}) 278 | assert a.spec.replicas == 2 279 | 280 | 281 | def test_signature_change_not_allowed(): 282 | gr.create_namespaced_resource('test.eu', 'v1', 'TestN', 'tests') 283 | gr.create_namespaced_resource('test.eu', 'v1', 'TestN', 'tests') 284 | 285 | with pytest.raises(ValueError, match='.*different signature'): 286 | gr.create_namespaced_resource('test.eu', 'v1', 'TestN', 'tests', verbs=['get']) 287 | 288 | with pytest.raises(ValueError, match='.*different signature'): 289 | gr.create_global_resource('test.eu', 'v1', 'TestN', 'tests') 290 | -------------------------------------------------------------------------------- /tests/test_operators.py: -------------------------------------------------------------------------------- 1 | from lightkube import operators 2 | 3 | 4 | def test_exists(): 5 | assert operators.exists().encode('key') == 'key' 6 | 7 | 8 | def test_not_exists(): 9 | assert operators.not_exists().encode('key') == '!key' 10 | 11 | 12 | def test_equal(): 13 | assert operators.equal('xxx').encode('key') == 'key=xxx' 14 | 15 | 16 | def test_not_equal(): 17 | assert operators.not_equal('xxx').encode('key') == 'key!=xxx' 18 | 19 | 20 | def test_in(): 21 | assert operators.in_(['xxx', 'yyy']).encode('key') == 'key in (xxx,yyy)' 22 | 23 | 24 | def test_not_in(): 25 | assert operators.not_in(['xxx', 'zzz']).encode('key') == 'key notin (xxx,zzz)' 26 | -------------------------------------------------------------------------------- /tests/test_quantity.py: -------------------------------------------------------------------------------- 1 | import decimal 2 | import pytest 3 | 4 | from lightkube.models.core_v1 import ResourceRequirements 5 | from lightkube.utils.quantity import parse_quantity 6 | from lightkube.utils.quantity import equals_canonically 7 | 8 | 9 | def test_unitless(): 10 | """Unitless values must be interpreted as decimal notation.""" 11 | assert parse_quantity("1.5") == decimal.Decimal("1.5") 12 | assert parse_quantity("-1.5") == decimal.Decimal("-1.5") 13 | assert parse_quantity("0.30000000000000004") == decimal.Decimal("0.301") 14 | assert parse_quantity("0.09999999999999998") == decimal.Decimal("0.1") 15 | assert parse_quantity("3.141592653") == decimal.Decimal("3.142") 16 | 17 | 18 | def test_binary_notation(): 19 | assert parse_quantity("1.5Gi") == parse_quantity("1536Mi") == decimal.Decimal("1610612736") 20 | assert parse_quantity("0.9Gi") == decimal.Decimal("966367641.6") 21 | 22 | 23 | def test_decimal_notation(): 24 | assert parse_quantity("1.5G") == decimal.Decimal("1500000000") 25 | assert parse_quantity("0.9G") == decimal.Decimal("900000000") 26 | assert parse_quantity("500m") == decimal.Decimal("0.5") 27 | 28 | 29 | def test_none(): 30 | assert parse_quantity(None) is None 31 | 32 | 33 | def test_invalid_value(): 34 | with pytest.raises(ValueError): 35 | parse_quantity("1.2.3") 36 | with pytest.raises(ValueError): 37 | parse_quantity("1e2.3") 38 | with pytest.raises(ValueError): 39 | # decimal.InvalidOperation 40 | parse_quantity("9e999") 41 | with pytest.raises(ValueError): 42 | # decimal.Overflow 43 | parse_quantity("9e9999999") 44 | 45 | 46 | def test_invalid_unit(): 47 | with pytest.raises(ValueError): 48 | parse_quantity("1kb") 49 | with pytest.raises(ValueError): 50 | parse_quantity("1GGi") 51 | 52 | 53 | def test_whitespace(): 54 | with pytest.raises(ValueError): 55 | parse_quantity("") 56 | with pytest.raises(ValueError): 57 | parse_quantity(" ") 58 | with pytest.raises(ValueError): 59 | parse_quantity("1 ") 60 | with pytest.raises(ValueError): 61 | parse_quantity(" 1") 62 | with pytest.raises(ValueError): 63 | parse_quantity("1 Gi") 64 | 65 | 66 | def test_canonical_equality_for_dicts_with_blanks(): 67 | first = {} 68 | second = {} 69 | assert equals_canonically(first, second) 70 | 71 | first = {} 72 | second = None 73 | assert equals_canonically(first, second) 74 | 75 | 76 | def test_canonical_equality_for_dicts_with_cpu(): 77 | first = {"cpu": "0.5"} 78 | second = {"cpu": "500m"} 79 | assert equals_canonically(first, second) 80 | 81 | 82 | def test_canonical_equality_for_dicts_with_memory(): 83 | first = {"memory": "1G"} 84 | second = {"memory": "1Gi"} 85 | assert not equals_canonically(first, second) 86 | 87 | 88 | def test_canonical_equality_for_dicts_with_both(): 89 | first = {"cpu": "0.6", "memory": "1.5Gi"} 90 | second = {"cpu": "600m", "memory": "1536Mi"} 91 | assert equals_canonically(first, second) 92 | 93 | 94 | def test_canonical_equality_for_extended_resources(): 95 | first = {"cpu": "0.6", "example.com/foo": "1"} 96 | second = {"cpu": "600m", "example.com/foo": "1"} 97 | assert equals_canonically(first, second) 98 | 99 | first = {"cpu": "0.6", "example.com/foo": "1"} 100 | second = {"cpu": "600m", "example.com/foo": "2"} 101 | assert not equals_canonically(first, second) 102 | 103 | first = {"cpu": "0.6", "example.com/foo": "1"} 104 | second = {"cpu": "600m"} 105 | assert not equals_canonically(first, second) 106 | 107 | 108 | def test_canonical_equality_for_resource_requirements_with_blanks(): 109 | first = ResourceRequirements() 110 | second = ResourceRequirements() 111 | assert equals_canonically(first, second) 112 | 113 | first = ResourceRequirements(limits={}) 114 | second = ResourceRequirements(limits={}) 115 | assert equals_canonically(first, second) 116 | 117 | first = ResourceRequirements(limits={}) 118 | second = ResourceRequirements(requests={}) 119 | assert equals_canonically(first, second) 120 | 121 | 122 | def test_canonical_equality_for_resource_requirements_with_cpu(): 123 | first = ResourceRequirements(limits={"cpu": "0.5"}) 124 | second = ResourceRequirements(limits={"cpu": "500m"}) 125 | assert equals_canonically(first, second) 126 | 127 | first = ResourceRequirements(requests={"cpu": "0.5"}) 128 | second = ResourceRequirements(requests={"cpu": "500m"}) 129 | assert equals_canonically(first, second) 130 | 131 | first = ResourceRequirements(limits={"cpu": "0.5"}) 132 | second = ResourceRequirements(requests={"cpu": "500m"}) 133 | assert not equals_canonically(first, second) 134 | 135 | first = ResourceRequirements(limits={"cpu": "0.6"}, requests={"cpu": "0.5"}) 136 | second = ResourceRequirements(limits={"cpu": "600m"}, requests={"cpu": "500m"}) 137 | assert equals_canonically(first, second) 138 | 139 | 140 | def test_canonical_equality_for_resource_requirements_with_memory(): 141 | first = ResourceRequirements(limits={"memory": "1G"}) 142 | second = ResourceRequirements(limits={"memory": "1Gi"}) 143 | assert not equals_canonically(first, second) 144 | 145 | 146 | def test_canonical_equality_for_resource_requirements_with_both(): 147 | first = ResourceRequirements(limits={"cpu": "0.6", "memory": "1.5Gi"}, requests={"cpu": "0.5"}) 148 | second = ResourceRequirements(limits={"cpu": "600m", "memory": "1536Mi"}, requests={"cpu": "500m"}) 149 | assert equals_canonically(first, second) 150 | 151 | 152 | def test_invalid_canonical_equality(): 153 | with pytest.raises(TypeError): 154 | equals_canonically({}, ResourceRequirements()) 155 | with pytest.raises(TypeError): 156 | equals_canonically(None, ResourceRequirements()) 157 | -------------------------------------------------------------------------------- /tests/test_resource_registry.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from lightkube.codecs import resource_registry 4 | from lightkube.resources.core_v1 import Pod 5 | from lightkube.resources.apps_v1 import Deployment 6 | from lightkube.resources.events_v1 import Event 7 | from lightkube.core import resource as res 8 | 9 | @pytest.fixture(autouse=True) 10 | def cleanup_registry(): 11 | """Cleanup the registry after each test""" 12 | yield 13 | resource_registry.clear() 14 | 15 | @pytest.mark.parametrize( 16 | "version,kind,Res", 17 | [("v1", "Pod", Pod), ("apps/v1", "Deployment", Deployment), ("events.k8s.io/v1", "Event", Event)] 18 | ) 19 | def test_register(version, kind, Res): 20 | assert resource_registry.get(version, kind) is None 21 | res = resource_registry.register(Res) 22 | assert res is Res 23 | assert resource_registry.get(version, kind) is Res 24 | 25 | 26 | def test_register_decorator(): 27 | assert resource_registry.get("test.io/v1", "Search") is None 28 | 29 | @resource_registry.register 30 | class Search(res.NamespacedResource): 31 | _api_info = res.ApiInfo( 32 | resource=res.ResourceDef('test.io', 'v1', 'Search'), 33 | plural='searches', 34 | verbs=['delete', 'deletecollection', 'get', 'global_list', 'global_watch', 'list', 'patch', 'post', 'put', 35 | 'watch'] 36 | ) 37 | 38 | assert resource_registry.get("test.io/v1", "Search") is Search 39 | 40 | 41 | @pytest.mark.parametrize( 42 | "version,kind,Res", 43 | [("v1", "Pod", Pod), ("apps/v1", "Deployment", Deployment), ("events.k8s.io/v1", "Event", Event)] 44 | ) 45 | def test_load(version, kind, Res): 46 | assert resource_registry.get(version, kind) is None 47 | pod_class = resource_registry.load(version, kind) 48 | assert pod_class is Res 49 | assert resource_registry.get(version, kind) is Res 50 | -------------------------------------------------------------------------------- /tests/test_selector.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from lightkube.core.selector import build_selector 4 | from lightkube import operators 5 | 6 | 7 | def test_simple_types(): 8 | r = build_selector({ 9 | 'k1': 'v1', 10 | 'k2': None, 11 | 'k3': ['b', 'c'], 12 | 'k4': {'a', 'b'}, 13 | 'k5': ('d', 'b') 14 | }) 15 | 16 | assert r == "k1=v1,k2,k3 in (b,c),k4 in (a,b),k5 in (b,d)" 17 | 18 | 19 | def test_operators(): 20 | r = build_selector({ 21 | 'k1': operators.equal('v1'), 22 | 'k2': operators.not_exists(), 23 | 'k3': operators.in_(['b', 'c']), 24 | 'k4': operators.not_in(['b', 'c']), 25 | 'k5': operators.not_equal('v5'), 26 | 'k6': operators.exists() 27 | }) 28 | 29 | assert r == "k1=v1,!k2,k3 in (b,c),k4 notin (b,c),k5!=v5,k6" 30 | 31 | 32 | def test_binary_only_selector(): 33 | with pytest.raises(ValueError): 34 | build_selector({'k2': None}, for_fields=True) 35 | 36 | with pytest.raises(ValueError): 37 | build_selector({'k2': operators.in_(['b', 'c'])}, for_fields=True) 38 | 39 | r = build_selector({'k1': 'a', 'k2': operators.not_equal('a')}, for_fields=True) 40 | assert r == "k1=a,k2!=a" 41 | 42 | r = build_selector({'k1': 'a', 'k2': operators.not_in(['a', 'b'])}, for_fields=True) 43 | assert r == "k1=a,k2!=a,k2!=b" 44 | -------------------------------------------------------------------------------- /tests/test_sort_objects.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | import pytest 3 | 4 | from lightkube import sort_objects 5 | 6 | 7 | @pytest.fixture() 8 | def resources_in_apply_order(): 9 | mock_resource = namedtuple("resource", ("kind",)) 10 | resources = [ 11 | mock_resource(kind="CustomResourceDefinition"), 12 | mock_resource(kind="Namespace"), 13 | mock_resource(kind="Secret"), 14 | mock_resource(kind="ServiceAccount"), 15 | mock_resource(kind="PersistentVolume"), 16 | mock_resource(kind="PersistentVolumeClaim"), 17 | mock_resource(kind="ConfigMap"), 18 | mock_resource(kind="Role"), 19 | mock_resource(kind="ClusterRole"), 20 | mock_resource(kind="RoleBinding"), 21 | mock_resource(kind="ClusterRoleBinding"), 22 | mock_resource(kind="something-else"), 23 | ] 24 | return resources 25 | 26 | 27 | @pytest.mark.parametrize( 28 | "reverse", 29 | [ 30 | False, # Desired result in apply-friendly order 31 | True, # Desired order in delete-friendly order 32 | ] 33 | ) 34 | def test_sort_objects_by_kind(reverse, resources_in_apply_order): 35 | """Tests that sort_objects can kind-sort objects in both apply and delete orders.""" 36 | resources_expected_order = resources_in_apply_order 37 | if reverse: 38 | resources_expected_order = list(reversed(resources_expected_order)) 39 | 40 | # Add disorder to the test input 41 | resources_unordered = resources_expected_order[1:] + [resources_expected_order[0]] 42 | 43 | result = sort_objects(resources_unordered, reverse=reverse) 44 | assert result == resources_expected_order 45 | --------------------------------------------------------------------------------