├── kopf ├── _cogs │ ├── __init__.py │ ├── configs │ │ └── __init__.py │ ├── aiokits │ │ ├── __init__.py │ │ ├── aiobindings.py │ │ ├── aiotime.py │ │ ├── aiovalues.py │ │ └── aioadapters.py │ ├── helpers │ │ ├── .gitignore │ │ ├── __init__.py │ │ ├── typedefs.py │ │ ├── loaders.py │ │ ├── hostnames.py │ │ └── thirdparty.py │ ├── structs │ │ ├── ids.py │ │ ├── __init__.py │ │ └── finalizers.py │ └── clients │ │ ├── __init__.py │ │ ├── creating.py │ │ └── fetching.py ├── _core │ ├── __init__.py │ ├── actions │ │ ├── __init__.py │ │ └── lifecycles.py │ ├── intents │ │ ├── __init__.py │ │ ├── filters.py │ │ └── stoppers.py │ ├── reactor │ │ └── __init__.py │ └── engines │ │ └── __init__.py ├── py.typed ├── testing.py ├── __main__.py └── _kits │ ├── __init__.py │ └── loops.py ├── .codecov.yml ├── .github ├── FUNDING.yml ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── config.yaml │ ├── question.yaml │ ├── feature-request.yaml │ └── bug-report.yaml ├── ISSUE_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── publish.yaml │ └── codeql.yml ├── MAINTAINERS ├── docs ├── .gitattributes ├── architecture-layers.png ├── walkthrough │ ├── cleanup.rst │ └── prerequisites.rst ├── deployment-depl.yaml ├── naming.rst ├── loading.rst ├── troubleshooting.rst ├── minikube.rst ├── install.rst ├── startup.rst ├── shutdown.rst ├── vision.rst ├── tips-and-tricks.rst ├── async.rst ├── testing.rst ├── index.rst ├── idempotence.rst └── results.rst ├── examples ├── requirements.txt ├── 06-peering │ ├── example.py │ └── README.md ├── 09-testing │ ├── example.py │ ├── README.md │ └── test_example_09.py ├── 99-all-at-once │ └── README.md ├── 01-minimal │ ├── example.py │ └── README.md ├── .isort.cfg ├── README.md ├── 07-subhandlers │ └── example.py ├── obj.yaml ├── 12-embedded │ ├── test_nothing.py │ └── README.md ├── 15-timers │ └── example.py ├── 08-events │ ├── example.py │ └── README.md ├── 04-events │ ├── example.py │ └── README.md ├── 05-handlers │ ├── example.py │ └── README.md ├── 10-builtins │ ├── README.md │ ├── example.py │ └── test_example_10.py ├── 03-exceptions │ ├── README.md │ └── example.py ├── 02-children │ ├── example.py │ └── README.md ├── crd.yaml ├── 16-indexing │ └── example.py ├── 11-filtering-handlers │ └── README.md └── 14-daemons │ └── example.py ├── tests ├── test_it.py ├── peering │ ├── conftest.py │ ├── test_resource_guessing.py │ └── test_keepalive.py ├── lifecycles │ ├── conftest.py │ ├── test_global_defaults.py │ └── test_real_invocation.py ├── k8s │ ├── conftest.py │ ├── test_watching_bookmarks.py │ ├── test_list_objs.py │ └── test_watching_with_freezes.py ├── registries │ ├── test_default_registry.py │ ├── test_creation.py │ ├── test_subhandlers_ids.py │ ├── test_matching_of_resources.py │ ├── test_id_detection.py │ ├── test_resumes_mixed_in.py │ └── test_matching_of_callbacks.py ├── test_absent_modules.py ├── utilities │ └── aiotasks │ │ ├── test_task_waiting.py │ │ └── test_task_selection.py ├── apis │ ├── test_default_namespace.py │ └── test_iterjsonlines.py ├── posting │ └── conftest.py ├── logging │ ├── conftest.py │ └── test_loggers.py ├── handling │ ├── indexing │ │ └── conftest.py │ ├── daemons │ │ ├── test_daemon_rematching.py │ │ ├── test_daemon_spawning.py │ │ ├── test_timer_triggering.py │ │ ├── test_timer_intervals.py │ │ └── test_timer_filtration.py │ └── test_parametrization.py ├── test_async.py ├── test_thirdparty.py ├── cli │ ├── test_help.py │ ├── conftest.py │ └── test_preloading.py ├── references │ ├── test_namespace_selection.py │ └── test_selector_properties.py ├── reactor │ ├── test_uids.py │ └── conftest.py ├── dicts │ ├── test_parsing.py │ ├── test_ensuring.py │ ├── test_removing.py │ └── test_walking.py ├── hierarchies │ ├── conftest.py │ └── test_type_validation.py ├── primitives │ ├── test_conditions.py │ └── test_containers.py ├── test_versions.py ├── admission │ ├── test_serving_ephemeral_memos.py │ ├── test_webhook_ngrok.py │ ├── test_certificates.py │ └── test_jsonpatch.py ├── authentication │ ├── test_tempfiles.py │ ├── test_reauthentication.py │ └── test_connectioninfo.py ├── persistence │ └── test_outcomes.py ├── e2e │ └── conftest.py ├── basic-structs │ ├── test_memories.py │ └── test_memos.py ├── settings │ ├── test_executor.py │ └── test_defaults.py ├── diffs │ └── test_protocols.py ├── timing │ └── test_sleeping.py ├── observation │ └── test_processing_of_namespaces.py └── test_filtering_helpers.py ├── SECURITY.md ├── .readthedocs.yaml ├── .gitignore ├── tools ├── install-kubectl.sh ├── install-kind.sh └── install-minikube.sh ├── CONTRIBUTORS.md ├── LICENSE ├── peering.yaml └── CONTRIBUTING.md /kopf/_cogs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kopf/_core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | comment: off 2 | -------------------------------------------------------------------------------- /kopf/_cogs/configs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kopf/_core/actions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kopf/_core/intents/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: nolar 2 | -------------------------------------------------------------------------------- /kopf/py.typed: -------------------------------------------------------------------------------- 1 | # Marker file for PEP 561 2 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | Sergey Vasilyev 2 | -------------------------------------------------------------------------------- /docs/.gitattributes: -------------------------------------------------------------------------------- 1 | *.png binary 2 | *.xml binary 3 | -------------------------------------------------------------------------------- /examples/requirements.txt: -------------------------------------------------------------------------------- 1 | kopf 2 | pykube-ng 3 | pyyaml 4 | -------------------------------------------------------------------------------- /tests/test_it.py: -------------------------------------------------------------------------------- 1 | def test_importing_works(): 2 | import kopf 3 | assert kopf 4 | -------------------------------------------------------------------------------- /docs/architecture-layers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nolar/kopf/HEAD/docs/architecture-layers.png -------------------------------------------------------------------------------- /kopf/_cogs/aiokits/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | General-purpose tools & kits for asyncio primitives. 3 | """ 4 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Who is automatically assigned to the new PRs based on their content. 2 | * @nolar 3 | -------------------------------------------------------------------------------- /examples/06-peering/example.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | 3 | 4 | @kopf.on.create('kopfexamples') 5 | def create_fn(**kwargs): 6 | pass 7 | -------------------------------------------------------------------------------- /kopf/_cogs/helpers/.gitignore: -------------------------------------------------------------------------------- 1 | # Auto-generated by setuptools-scm on builds & pip install. Configured in pyproject.toml. 2 | versions.py 3 | -------------------------------------------------------------------------------- /tests/peering/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture(autouse=True) 5 | def _autouse_fake_vault(fake_vault): 6 | pass 7 | -------------------------------------------------------------------------------- /examples/09-testing/example.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | 3 | 4 | @kopf.on.create('kopfexamples') 5 | def create_fn(logger, **kwargs): 6 | logger.info("Something was logged here.") 7 | -------------------------------------------------------------------------------- /examples/99-all-at-once/README.md: -------------------------------------------------------------------------------- 1 | # Kopf example with all the features at once 2 | 3 | This operator contains all the features of the framework at once. 4 | It is used mostly for the development and debugging. 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yaml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | contact_links: 3 | - name: Community Support 4 | url: https://github.com/nolar/kopf/discussions/categories/q-a 5 | about: Please ask and answer questions here. 6 | -------------------------------------------------------------------------------- /examples/01-minimal/example.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | 3 | 4 | @kopf.on.create('kopfexamples') 5 | def create_fn(spec, **kwargs): 6 | print(f"And here we are! Creating: {spec}") 7 | return {'message': 'hello world'} # will be the new status 8 | -------------------------------------------------------------------------------- /tests/lifecycles/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import kopf 4 | 5 | 6 | @pytest.fixture(autouse=True) 7 | def clear_default_lifecycle(): 8 | try: 9 | yield 10 | finally: 11 | kopf.set_default_lifecycle(None) 12 | -------------------------------------------------------------------------------- /kopf/testing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helper tools to test the Kopf-based operators. 3 | 4 | This module is a part of the framework's public interface. 5 | """ 6 | from kopf._kits.runner import KopfRunner 7 | 8 | __all__ = [ 9 | 'KopfRunner', 10 | ] 11 | -------------------------------------------------------------------------------- /examples/.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | line_length = 100 3 | multi_line_output = 11 4 | balanced_wrapping = true 5 | combine_as_imports = true 6 | case_sensitive = true 7 | 8 | known_third_party = kopf 9 | 10 | filter_files = true 11 | skip_glob = kopf/** 12 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | We acknowledge that every line of code that we write may potentially contain security issues. 2 | We are trying to deal with it responsibly and provide patches as quickly as possible. 3 | In case you detect any security issues, contact nolar@nolar.info. 4 | -------------------------------------------------------------------------------- /kopf/__main__.py: -------------------------------------------------------------------------------- 1 | """ 2 | CLI entry point, when used as a module: `python -m kopf`. 3 | 4 | Useful for debugging in the IDEs (use the start-mode "Module", module "kopf"). 5 | """ 6 | from kopf import cli 7 | 8 | if __name__ == '__main__': 9 | cli.main() 10 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Long story short 2 | 3 | 4 | 5 | 6 | ## Description 7 | 8 | 10 | -------------------------------------------------------------------------------- /tests/k8s/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture(autouse=True) 5 | def _autouse_resp_mocker(resp_mocker, version_api): 6 | pass 7 | 8 | 9 | @pytest.fixture(autouse=True) 10 | def _prevent_retries_in_api_tests(settings): 11 | settings.networking.error_backoffs = [] 12 | -------------------------------------------------------------------------------- /docs/walkthrough/cleanup.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | Cleanup 3 | ======= 4 | 5 | To clean up the cluster after all the experiments are finished: 6 | 7 | .. code-block:: bash 8 | 9 | kubectl delete -f obj.yaml 10 | kubectl delete -f crd.yaml 11 | 12 | Alternatively, Minikube can be reset for the full cluster cleanup. 13 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Kopf examples 2 | 3 | For the examples to work, a sample CRD (Custom Resource Definition) should be created: 4 | 5 | ```bash 6 | kubectl apply -f crd.yaml 7 | ``` 8 | 9 | Also, some libraries are needed for some operators and handlers: 10 | 11 | ```bash 12 | pip install --group test -e . 13 | ``` 14 | -------------------------------------------------------------------------------- /examples/07-subhandlers/example.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | 3 | 4 | @kopf.on.create('kopfexamples') 5 | def create_fn(spec, **kwargs): 6 | 7 | for item in spec.get('items', []): 8 | 9 | @kopf.subhandler(id=item) 10 | async def create_item_fn(item=item, **kwargs): 11 | print(f"=== Handling creation for {item}. ===") 12 | -------------------------------------------------------------------------------- /examples/obj.yaml: -------------------------------------------------------------------------------- 1 | # A demo custom resource for the Kopf example operators. 2 | apiVersion: kopf.dev/v1 3 | kind: KopfExample 4 | metadata: 5 | name: kopf-example-1 6 | labels: 7 | somelabel: somevalue 8 | annotations: 9 | someannotation: somevalue 10 | spec: 11 | duration: 1m 12 | field: value 13 | items: 14 | - item1 15 | - item2 16 | -------------------------------------------------------------------------------- /kopf/_cogs/structs/ids.py: -------------------------------------------------------------------------------- 1 | from typing import NewType 2 | 3 | # Strings are taken from the users, but then tainted as this type for stricter type-checking: 4 | # to prevent usage of some other strings (e.g. operator id) as the handlers ids. 5 | # It is so much ubiquitous that it deserves its own module to avoid circular dependencies. 6 | HandlerId = NewType('HandlerId', str) 7 | -------------------------------------------------------------------------------- /examples/12-embedded/test_nothing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Embeddable operators require very customised application-specific testing. 3 | Kopf cannot help here beyond its regular `kopf.testing.KopfRunner` helper, 4 | which is an equivalent of `kopf run` command. 5 | 6 | This file exists to disable the implicit e2e tests 7 | (they skip if explicit e2e tests exist in the example directory). 8 | """ 9 | -------------------------------------------------------------------------------- /examples/09-testing/README.md: -------------------------------------------------------------------------------- 1 | # Kopf example for testing the operator 2 | 3 | Kopf provides some basic tools to test the Kopf-based operators. 4 | With these tools, the testing frameworks (pytest in this case) 5 | can run the operator-under-test in the background, while the test 6 | performs the resource manipulation. 7 | 8 | To run the tests: 9 | 10 | ```bash 11 | pytest 12 | ``` 13 | -------------------------------------------------------------------------------- /examples/15-timers/example.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | 3 | 4 | @kopf.timer('kopfexamples', idle=5, interval=2) 5 | def every_few_seconds_sync(spec, logger, **_): 6 | logger.info(f"Ping from a sync timer: field={spec['field']!r}") 7 | 8 | 9 | @kopf.timer('kopfexamples', idle=10, interval=4) 10 | async def every_few_seconds_async(spec, logger, **_): 11 | logger.info(f"Ping from an async timer: field={spec['field']!r}") 12 | -------------------------------------------------------------------------------- /kopf/_core/reactor/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The reactor groups all modules to watch & process the low- & high-level events. 3 | 4 | The low-level events are the kubernetes watch streams, received on every 5 | object change, including the metadata, status, etc. 6 | 7 | The high-level events are the actually identified changes in the objects, 8 | such as their creation, deletion, update both in general and per-field. 9 | """ 10 | -------------------------------------------------------------------------------- /kopf/_core/engines/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Engines are things that run around the reactor (see `kopf._core.reactor`) 3 | to help it to function at full strength, but are not part of it. 4 | For example, all never-ending side-tasks for peering and k8s-event-posting. 5 | 6 | The reactor and engines exchange the state with each other (bi-directionally) 7 | via the provided synchronization objects, usually asyncio events & queues. 8 | """ 9 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | version: 2 4 | formats: all 5 | build: 6 | os: ubuntu-24.04 7 | tools: 8 | python: "3" 9 | jobs: 10 | install: 11 | - pip install --upgrade pip 12 | - pip install --group docs -e . 13 | sphinx: 14 | configuration: docs/conf.py 15 | builder: "dirhtml" 16 | # fail_on_warning: true 17 | -------------------------------------------------------------------------------- /examples/08-events/example.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | 3 | 4 | @kopf.on.event('kopfexamples') 5 | def event_fn_with_error(**kwargs): 6 | raise Exception("Oops!") 7 | 8 | 9 | @kopf.on.event('kopfexamples') 10 | def normal_event_fn(event, **kwargs): 11 | print(f"Event received: {event!r}") 12 | 13 | 14 | # Marks for the e2e tests (see tests/e2e/test_examples.py): 15 | E2E_ALLOW_TRACEBACKS = True 16 | E2E_SUCCESS_COUNTS = {'normal_event_fn': 2} 17 | -------------------------------------------------------------------------------- /tests/registries/test_default_registry.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | 3 | 4 | def test_getting_default_registry(): 5 | registry = kopf.get_default_registry() 6 | assert isinstance(registry, kopf.OperatorRegistry) 7 | 8 | 9 | def test_setting_default_registry(): 10 | registry_expected = kopf.OperatorRegistry() 11 | kopf.set_default_registry(registry_expected) 12 | registry_actual = kopf.get_default_registry() 13 | assert registry_actual is registry_expected 14 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 16 | -------------------------------------------------------------------------------- /docs/deployment-depl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kopfexample-operator 5 | spec: 6 | replicas: 1 7 | strategy: 8 | type: Recreate 9 | selector: 10 | matchLabels: 11 | application: kopfexample-operator 12 | template: 13 | metadata: 14 | labels: 15 | application: kopfexample-operator 16 | spec: 17 | serviceAccountName: kopfexample-account 18 | containers: 19 | - name: the-only-one 20 | image: nolar/kopf-operator 21 | -------------------------------------------------------------------------------- /docs/naming.rst: -------------------------------------------------------------------------------- 1 | ====== 2 | Naming 3 | ====== 4 | 5 | Kopf is an abbreviation either for 6 | **K**\ubernetes **O**\perator **P**\ythonic **F**\ramework, or for 7 | **K**\ubernetes **OP**\erator **F**\ramework --- whatever you like more. 8 | 9 | "Kopf" also means "head" in German. 10 | 11 | It is capitalised in natural language texts: 12 | 13 | *I like using Kopf to manage my domain in Kubernetes.* 14 | 15 | It is lower-cased in all system and code references:: 16 | 17 | pip install kopf 18 | import kopf 19 | -------------------------------------------------------------------------------- /tests/test_absent_modules.py: -------------------------------------------------------------------------------- 1 | """ 2 | Verify that the module-prohibiting fixtures do work as expected. 3 | Otherwise, the tests are useless or can show false-positives. 4 | """ 5 | import pytest 6 | 7 | 8 | @pytest.mark.usefixtures('no_kubernetes') 9 | def test_client_uninstalled_has_effect(): 10 | with pytest.raises(ImportError): 11 | import kubernetes 12 | 13 | 14 | @pytest.mark.usefixtures('no_pykube') 15 | def test_pykube_uninstalled_has_effect(): 16 | with pytest.raises(ImportError): 17 | import pykube 18 | -------------------------------------------------------------------------------- /tests/utilities/aiotasks/test_task_waiting.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from kopf._cogs.aiokits.aiotasks import wait 4 | 5 | 6 | async def test_wait_with_no_tasks(): 7 | done, pending = await wait([]) 8 | assert not done 9 | assert not pending 10 | 11 | 12 | async def test_wait_with_timeout(): 13 | flag = asyncio.Event() 14 | task = asyncio.create_task(flag.wait()) 15 | done, pending = await wait([task], timeout=0.01) 16 | assert not done 17 | assert pending == {task} 18 | flag.set() 19 | await task 20 | -------------------------------------------------------------------------------- /tests/apis/test_default_namespace.py: -------------------------------------------------------------------------------- 1 | from kopf._cogs.clients.api import get_default_namespace 2 | 3 | 4 | async def test_default_namespace_when_unset(mocker, enforced_context): 5 | mocker.patch.object(enforced_context, 'default_namespace', None) 6 | ns = await get_default_namespace() 7 | assert ns is None 8 | 9 | 10 | async def test_default_namespace_when_set(mocker, enforced_context): 11 | mocker.patch.object(enforced_context, 'default_namespace', 'xyz') 12 | ns = await get_default_namespace() 13 | assert ns == 'xyz' 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # Distribution / packaging 6 | build/ 7 | dist/ 8 | .eggs/ 9 | *.egg-info/ 10 | *.egg 11 | 12 | # Unit test / coverage reports 13 | htmlcov/ 14 | .tox/ 15 | .coverage 16 | .coverage.* 17 | .cache 18 | .pytest_cache 19 | nosetests.xml 20 | coverage.xml 21 | junit.xml 22 | *.cover 23 | .hypothesis/ 24 | .mypy_cache 25 | 26 | # Documentation 27 | docs/_build 28 | docs/packages 29 | 30 | # VirtualEnv 31 | env 32 | 33 | # VSCode 34 | .vscode 35 | 36 | # Idea / PyCharm 37 | .idea 38 | -------------------------------------------------------------------------------- /kopf/_cogs/structs/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | All the functions to manipulate the resource fields, state changes, etc. 3 | 4 | Grouped by the type of the fields and the purpose of the manipulation. 5 | 6 | Used in the handling routines to check if there were significant changes at all 7 | (i.e. not our own internal and system changes, like the uids, links, etc), 8 | and to get the exact per-field diffs for the specific handler functions. 9 | 10 | All the functions are purely data-manipulative and computational. 11 | No external calls or any i/o activities are done here. 12 | """ 13 | -------------------------------------------------------------------------------- /kopf/_kits/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Toolkits to improve the developer experience in the context of Kopf. 3 | 4 | They are not needed to use the framework or to run the operator 5 | (unlike all other packages), but they can make the development 6 | of the operators much easier. 7 | 8 | Some things can be considered as the clients' responsibilities 9 | rather than the operator framework's responsibilities. 10 | In that case, the decision point is whether the functions work 11 | "in the context of Kopf" at least to some extent 12 | (e.g. by using its contextual information of the current handler). 13 | """ 14 | -------------------------------------------------------------------------------- /tools/install-kubectl.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Install kubectl. 3 | # 4 | # Use the latest client version always, ignore the requested K8s version. 5 | # Kubectl is not a system-under-tests, it is a environment configuring tool. 6 | # 7 | set -eu 8 | set -x 9 | 10 | : ${K8S:=latest} 11 | 12 | if [[ "$K8S" == latest ]] ; then 13 | K8S="$( curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt )" 14 | fi 15 | 16 | curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/"$K8S"/bin/linux/amd64/kubectl 17 | chmod +x kubectl 18 | sudo mv kubectl /usr/local/bin/ 19 | -------------------------------------------------------------------------------- /tests/posting/conftest.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import pytest 4 | 5 | from kopf._core.engines.posting import event_queue_loop_var, event_queue_var 6 | 7 | 8 | @pytest.fixture() 9 | def event_queue_loop(loop): # must be sync-def 10 | token = event_queue_loop_var.set(loop) 11 | try: 12 | yield loop 13 | finally: 14 | event_queue_loop_var.reset(token) 15 | 16 | 17 | @pytest.fixture() 18 | def event_queue(): 19 | queue = asyncio.Queue() 20 | token = event_queue_var.set(queue) 21 | try: 22 | yield queue 23 | finally: 24 | event_queue_var.reset(token) 25 | -------------------------------------------------------------------------------- /tests/utilities/aiotasks/test_task_selection.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from kopf._cogs.aiokits.aiotasks import all_tasks 4 | 5 | 6 | async def test_alltasks_exclusion(): 7 | flag = asyncio.Event() 8 | task1 = asyncio.create_task(flag.wait()) 9 | task2 = asyncio.create_task(flag.wait()) 10 | done, pending = await asyncio.wait([task1, task2], timeout=0.01) 11 | assert not done 12 | 13 | tasks = await all_tasks(ignored=[task2]) 14 | assert task1 in tasks 15 | assert task2 not in tasks 16 | assert asyncio.current_task() not in tasks 17 | 18 | flag.set() 19 | await task1 20 | await task2 21 | -------------------------------------------------------------------------------- /tests/registries/test_creation.py: -------------------------------------------------------------------------------- 1 | from kopf._core.intents.registries import ActivityRegistry, OperatorRegistry, ResourceRegistry 2 | 3 | 4 | def test_activity_registry(activity_registry_cls): 5 | registry = activity_registry_cls() 6 | assert isinstance(registry, ActivityRegistry) 7 | 8 | 9 | def test_resource_registry(resource_registry_cls): 10 | registry = resource_registry_cls() 11 | assert isinstance(registry, ResourceRegistry) 12 | 13 | 14 | def test_operator_registry(operator_registry_cls): 15 | registry = operator_registry_cls() 16 | assert isinstance(registry, OperatorRegistry) 17 | assert not isinstance(registry, ResourceRegistry) 18 | -------------------------------------------------------------------------------- /kopf/_cogs/aiokits/aiobindings.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | 4 | async def condition_chain( 5 | source: asyncio.Condition, 6 | target: asyncio.Condition, 7 | ) -> None: 8 | """ 9 | A condition chain is a "clean" hack to attach one condition to another. 10 | 11 | It is a "clean" (not "dirty") hack to wake up the webhook configuration 12 | managers when either the resources are revised (as seen in the insights), 13 | or a new client config is yielded from the webhook server. 14 | """ 15 | async with source: 16 | while True: 17 | await source.wait() 18 | async with target: 19 | target.notify_all() 20 | -------------------------------------------------------------------------------- /examples/04-events/example.py: -------------------------------------------------------------------------------- 1 | """ 2 | Send the custom events for the handled or other objects. 3 | """ 4 | import kopf 5 | 6 | 7 | @kopf.on.create('kopfexamples') 8 | def create_fn(body, **kwargs): 9 | 10 | # The all-purpose function for the event creation. 11 | kopf.event(body, type="SomeType", reason="SomeReason", message="Some message") 12 | 13 | # The shortcuts for the conventional events and common cases. 14 | kopf.info(body, reason="SomeReason", message="Some message") 15 | kopf.warn(body, reason="SomeReason", message="Some message") 16 | 17 | try: 18 | raise RuntimeError("Exception text.") 19 | except Exception: 20 | kopf.exception(body, reason="SomeReason", message="Some exception:") 21 | -------------------------------------------------------------------------------- /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- 1 | name: Publish to PyPI 2 | on: 3 | release: 4 | types: 5 | - published 6 | # push: 7 | # tags: 8 | # - "[0-9]+.[0-9]+*" 9 | workflow_dispatch: {} 10 | 11 | jobs: 12 | publish: 13 | name: Build and publish 14 | runs-on: ubuntu-24.04 15 | permissions: 16 | id-token: write # for trusted publishing 17 | steps: 18 | - uses: actions/checkout@v5 19 | - uses: actions/setup-python@v6 20 | with: 21 | python-version: "3.14" 22 | - run: pip install --upgrade pip build 23 | - run: python -m build # includes sdist & wheel by default 24 | - uses: pypa/gh-action-pypi-publish@release/v1 25 | with: 26 | skip_existing: true 27 | -------------------------------------------------------------------------------- /tests/logging/conftest.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import pytest 4 | 5 | from kopf._core.engines.posting import event_queue_loop_var, event_queue_var 6 | 7 | 8 | @pytest.fixture(autouse=True) 9 | def _caplog_all_levels(caplog): 10 | caplog.set_level(0) 11 | 12 | 13 | @pytest.fixture(autouse=True) 14 | def event_queue_loop(loop): # must be sync-def 15 | token = event_queue_loop_var.set(loop) 16 | try: 17 | yield loop 18 | finally: 19 | event_queue_loop_var.reset(token) 20 | 21 | 22 | @pytest.fixture(autouse=True) 23 | def event_queue(): 24 | queue = asyncio.Queue() 25 | token = event_queue_var.set(queue) 26 | try: 27 | yield queue 28 | finally: 29 | event_queue_var.reset(token) 30 | -------------------------------------------------------------------------------- /tests/handling/indexing/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kopf._cogs.structs.bodies import Body 4 | from kopf._core.engines.indexing import OperatorIndexer, OperatorIndexers 5 | 6 | 7 | @pytest.fixture() 8 | def indexers(): 9 | return OperatorIndexers() 10 | 11 | 12 | @pytest.fixture() 13 | def index(indexers): 14 | indexer = OperatorIndexer() 15 | indexers['index_fn'] = indexer 16 | return indexer.index 17 | 18 | 19 | @pytest.fixture() 20 | async def indexed_123(indexers, index, namespace): 21 | body = {'metadata': {'namespace': namespace, 'name': 'name1'}} 22 | key = indexers.make_key(Body(body)) 23 | indexers['index_fn'].replace(key, 123) 24 | assert set(index) == {None} 25 | assert set(index[None]) == {123} 26 | -------------------------------------------------------------------------------- /tests/lifecycles/test_global_defaults.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | 3 | 4 | def test_getting_default_lifecycle(): 5 | lifecycle = kopf.get_default_lifecycle() 6 | assert lifecycle is kopf.lifecycles.asap 7 | 8 | 9 | def test_setting_default_lifecycle(): 10 | lifecycle_expected = lambda handlers, *args, **kwargs: handlers 11 | kopf.set_default_lifecycle(lifecycle_expected) 12 | lifecycle_actual = kopf.get_default_lifecycle() 13 | assert lifecycle_actual is lifecycle_expected 14 | 15 | 16 | def test_resetting_default_lifecycle(): 17 | lifecycle_distracting = lambda handlers, *args, **kwargs: handlers 18 | kopf.set_default_lifecycle(lifecycle_distracting) 19 | kopf.set_default_lifecycle(None) 20 | lifecycle_actual = kopf.get_default_lifecycle() 21 | assert lifecycle_actual is kopf.lifecycles.asap 22 | -------------------------------------------------------------------------------- /tests/test_async.py: -------------------------------------------------------------------------------- 1 | """ 2 | Just to make sure that asyncio tests are configured properly. 3 | """ 4 | import asyncio 5 | 6 | _async_was_executed = False 7 | 8 | 9 | async def test_async_tests_are_enabled(timer): 10 | global _async_was_executed 11 | _async_was_executed = True # asserted in a sync-test below. 12 | 13 | with timer as t: 14 | await asyncio.sleep(0.5) 15 | 16 | assert t.seconds > 0.5 # real sleep 17 | 18 | 19 | async def test_async_mocks_are_enabled(timer, mocker): 20 | p = mocker.patch('asyncio.sleep') 21 | with timer as t: 22 | await asyncio.sleep(1.0) 23 | 24 | assert p.call_count > 0 25 | assert p.await_count > 0 26 | assert t.seconds < 0.01 # mocked sleep 27 | 28 | 29 | def test_async_test_was_executed_and_awaited(): 30 | assert _async_was_executed 31 | -------------------------------------------------------------------------------- /tools/install-kind.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Install K8s via KinD (Kubernetes-in-Docker). 3 | # 4 | # Spin-up times previously detected: 5 | # * k3d -- 20 seconds. 6 | # * kind -- 90 seconds. 7 | # * minikube -- 110-120 seconds. 8 | # 9 | # Not all of the latest K8s versions are available as the Kind versions. 10 | # Care should be taken when upgrading. Check the available versions at: 11 | # https://hub.docker.com/r/kindest/node/tags 12 | # 13 | set -eu 14 | set -x 15 | 16 | : ${KIND:=latest} 17 | : ${K8S:=latest} 18 | if [[ "$K8S" == latest ]] ; then 19 | K8S="$( curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt )" 20 | fi 21 | 22 | curl -Lo ./kind https://kind.sigs.k8s.io/dl/"$KIND"/kind-linux-amd64 23 | chmod +x ./kind 24 | sudo mv ./kind /usr/local/bin/ 25 | 26 | kind create cluster --image=kindest/node:"$K8S" 27 | -------------------------------------------------------------------------------- /kopf/_cogs/helpers/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | General-purpose helpers not related to the framework itself 3 | (neither to the reactor nor to the engines nor to the structs), 4 | which are used to prepare and control the runtime environment. 5 | 6 | These are things that should better be in the standard library 7 | or in the dependencies. 8 | 9 | Utilities do not depend on anything in the framework. For most cases, 10 | they do not even implement any entities or behaviours of the domain 11 | of K8s Operators, but rather some unrelated low-level patterns. 12 | 13 | As a rule of thumb, helpers MUST be abstracted from the framework 14 | to such an extent that they could be extracted as reusable libraries. 15 | If they implement concepts of the framework, they are not "helpers" 16 | (consider making them _kits, structs, engines, or the reactor parts). 17 | """ 18 | -------------------------------------------------------------------------------- /tests/test_thirdparty.py: -------------------------------------------------------------------------------- 1 | import types 2 | 3 | import pytest 4 | 5 | from kopf._cogs.helpers.thirdparty import KubernetesModel 6 | 7 | 8 | @pytest.mark.parametrize('name', ['V1Pod', 'V1ObjectMeta', 'V1PodSpec', 'V1PodTemplateSpec']) 9 | def test_kubernetes_model_classes_detection(kubernetes, name): 10 | cls = getattr(kubernetes.client, name) 11 | assert issubclass(cls, KubernetesModel) 12 | 13 | 14 | @pytest.mark.parametrize('name', ['CoreV1Api', 'ApiClient', 'Configuration']) 15 | def test_kubernetes_other_classes_detection(kubernetes, name): 16 | cls = getattr(kubernetes.client, name) 17 | assert not issubclass(cls, KubernetesModel) 18 | 19 | 20 | @pytest.mark.parametrize('cls', [object, types.SimpleNamespace]) 21 | def test_non_kubernetes_classes_detection(kubernetes, cls): 22 | assert not issubclass(cls, KubernetesModel) 23 | -------------------------------------------------------------------------------- /kopf/_core/intents/filters.py: -------------------------------------------------------------------------------- 1 | import enum 2 | from collections.abc import Mapping 3 | from typing import Any, TypeAlias 4 | 5 | from kopf._core.intents import callbacks 6 | 7 | 8 | class MetaFilterToken(enum.Enum): 9 | """ Tokens for filtering by annotations/labels. """ 10 | PRESENT = enum.auto() 11 | ABSENT = enum.auto() 12 | 13 | 14 | # For exporting to the top-level package. 15 | ABSENT = MetaFilterToken.ABSENT 16 | PRESENT = MetaFilterToken.PRESENT 17 | 18 | # Filters for handler specifications (not the same as the object's values). 19 | MetaFilter = Mapping[str, str | MetaFilterToken | callbacks.MetaFilterFn] 20 | 21 | # Filters for old/new values of a field. 22 | # NB: `Any` covers all other values, but we want to highlight that they are specially treated. 23 | ValueFilter: TypeAlias = Any | MetaFilterToken | callbacks.MetaFilterFn | None 24 | -------------------------------------------------------------------------------- /tests/registries/test_subhandlers_ids.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | from kopf._core.actions.execution import handler_var 3 | from kopf._core.actions.invocation import context 4 | from kopf._core.reactor.subhandling import subregistry_var 5 | 6 | 7 | # Used in the tests. Must be global-scoped, or its qualname will be affected. 8 | def child_fn(**_): 9 | pass 10 | 11 | 12 | def test_with_parent( 13 | parent_handler, resource_registry_cls, cause_factory): 14 | 15 | cause = cause_factory(resource_registry_cls) 16 | registry = resource_registry_cls() 17 | 18 | with context([(handler_var, parent_handler), (subregistry_var, registry)]): 19 | kopf.subhandler()(child_fn) 20 | 21 | handlers = registry.get_handlers(cause) 22 | assert len(handlers) == 1 23 | assert handlers[0].fn is child_fn 24 | assert handlers[0].id == 'parent_fn/child_fn' 25 | -------------------------------------------------------------------------------- /tests/cli/test_help.py: -------------------------------------------------------------------------------- 1 | def test_help_in_root(invoke, mocker): 2 | result = invoke(['--help']) 3 | 4 | assert result.exit_code == 0 5 | assert 'Usage: kopf [OPTIONS]' in result.output 6 | assert ' run ' in result.output 7 | assert ' freeze ' in result.output 8 | assert ' resume ' in result.output 9 | 10 | 11 | def test_help_in_subcommand(invoke, mocker): 12 | preload = mocker.patch('kopf._cogs.helpers.loaders.preload') 13 | real_run = mocker.patch('kopf._core.reactor.running.run') 14 | 15 | result = invoke(['run', '--help']) 16 | 17 | assert result.exit_code == 0 18 | assert not preload.called 19 | assert not real_run.called 20 | 21 | # Enough to be sure this is not a root command help. 22 | assert 'Usage: kopf run [OPTIONS]' in result.output 23 | assert ' --standalone' in result.output 24 | assert ' -m, --module' in result.output 25 | -------------------------------------------------------------------------------- /tests/references/test_namespace_selection.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from kopf._cogs.structs.references import select_specific_namespaces 4 | 5 | 6 | def test_empty_pattern_list(): 7 | names = select_specific_namespaces([]) 8 | assert not names 9 | 10 | 11 | def test_included_empty_string(): 12 | names = select_specific_namespaces(['']) 13 | assert names == {''} 14 | 15 | 16 | def test_included_exact_strings(): 17 | names = select_specific_namespaces(['ns2', 'ns1']) 18 | assert names == {'ns1', 'ns2'} 19 | 20 | 21 | def test_excluded_multipatterns(): 22 | names = select_specific_namespaces(['ns1,ns2']) 23 | assert not names 24 | 25 | 26 | def test_excluded_globs(): 27 | names = select_specific_namespaces(['n*s', 'n?s']) 28 | assert not names 29 | 30 | 31 | def test_excluded_regexps(): 32 | names = select_specific_namespaces([re.compile(r'ns1')]) 33 | assert not names 34 | -------------------------------------------------------------------------------- /docs/loading.rst: -------------------------------------------------------------------------------- 1 | Loading and importing 2 | ===================== 3 | 4 | Kopf requires the source files with the handlers on the command line. 5 | It does not do any attempts to guess the user's intentions 6 | or to introduce any conventions (at least, now). 7 | 8 | There are two ways to specify them (both mimicking the Python interpreter): 9 | 10 | * Direct script files:: 11 | 12 | kopf run file1.py file2.py 13 | 14 | * Importable modules:: 15 | 16 | kopf run -m package1.module1 -m package2.module2 17 | 18 | * Or mixed:: 19 | 20 | kopf run file1.py file2.py -m package1.module1 -m package2.module2 21 | 22 | Which way to use depends on how the source code is structured, 23 | and is out of the scope of Kopf. 24 | 25 | Each of the mentioned files and modules will be imported. 26 | The handlers should be registered during the import. 27 | This is usually done by using the function decorators --- see :doc:`/handlers`. 28 | -------------------------------------------------------------------------------- /docs/walkthrough/prerequisites.rst: -------------------------------------------------------------------------------- 1 | ================= 2 | Environment Setup 3 | ================= 4 | 5 | We need a running Kubernetes cluster and some tools for our experiments. 6 | If you have a cluster already preconfigured, you can skip this section. 7 | Otherwise, let's install minikube locally (e.g. for MacOS): 8 | 9 | * Python >= 3.10 (running in a venv is recommended, though is not necessary). 10 | * `Install kubectl `_ 11 | * :doc:`Install minikube ` (a local Kubernetes cluster) 12 | * :doc:`Install Kopf ` 13 | 14 | .. warning:: 15 | Unfortunately, Minikube cannot handle the PVC/PV resizing, 16 | as it uses the HostPath provider internally. 17 | You can either skip the :doc:`updates` step of this tutorial 18 | (where the sizes of the volumes are changed), 19 | or you can use an external Kubernetes cluster 20 | with real dynamically sized volumes. 21 | -------------------------------------------------------------------------------- /kopf/_cogs/clients/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | All the routines to talk to Kubernetes API and other APIs. 3 | 4 | This library is supposed to be mocked when the mocked K8s client is needed, 5 | and only the high-level logic has to be tested, not the API calls themselves. 6 | 7 | Beware: this is NOT a Kubernetes client. It is set of dedicated adapters 8 | specially tailored to do the framework-specific tasks, not the generic 9 | Kubernetes object manipulation. 10 | 11 | The operators MUST NOT rely on how the framework communicates with the cluster. 12 | Specifically: 13 | 14 | Currently, all the routines use the official Kubernetes client. 15 | Eventually, it can be replaced with anything else (e.g. pykube-ng). 16 | 17 | Currently, most of the routines are synchronous, i.e. blocking 18 | from the asyncio's point of view. Later, they can be replaced 19 | to async coroutines (if the client supports that), 20 | or put into the asyncio executors (thread pools). 21 | """ 22 | -------------------------------------------------------------------------------- /examples/05-handlers/example.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | 3 | 4 | @kopf.on.resume('kopfexamples') 5 | def resume_fn_1(**kwargs): 6 | print(f'RESUMED 1st') 7 | 8 | 9 | @kopf.on.create('kopfexamples') 10 | def create_fn_1(**kwargs): 11 | print('CREATED 1st') 12 | 13 | 14 | @kopf.on.resume('kopfexamples') 15 | def resume_fn_2(**kwargs): 16 | print(f'RESUMED 2nd') 17 | 18 | 19 | @kopf.on.create('kopfexamples') 20 | def create_fn_2(**kwargs): 21 | print('CREATED 2nd') 22 | 23 | 24 | @kopf.on.update('kopfexamples') 25 | def update_fn(old, new, diff, **kwargs): 26 | print('UPDATED') 27 | 28 | 29 | @kopf.on.delete('kopfexamples') 30 | def delete_fn_1(**kwargs): 31 | print('DELETED 1st') 32 | 33 | 34 | @kopf.on.delete('kopfexamples') 35 | def delete_fn_2(**kwargs): 36 | print('DELETED 2nd') 37 | 38 | 39 | @kopf.on.field('kopfexamples', field='spec.field') 40 | def field_fn(old, new, **kwargs): 41 | print(f'FIELD CHANGED: {old} -> {new}') 42 | -------------------------------------------------------------------------------- /docs/troubleshooting.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | Troubleshooting 3 | =============== 4 | 5 | .. _finalizers-blocking-deletion: 6 | 7 | ``kubectl`` freezes on object deletion 8 | ====================================== 9 | 10 | This can happen if the operator is down at the moment of deletion. 11 | 12 | The operator puts the finalizers on the objects as soon as it notices 13 | them for the first time. When the objects are *requested for deletion*, 14 | Kopf calls the deletion handlers and removes the finalizers, 15 | thus releasing the object for the *actual deletion* by Kubernetes. 16 | 17 | If the object has to be deleted without the operator starting again, 18 | you can remove the finalizers manually: 19 | 20 | .. code-block:: bash 21 | 22 | kubectl patch kopfexample kopf-example-1 -p '{"metadata": {"finalizers": []}}' --type merge 23 | 24 | The object will be removed by Kubernetes immediately. 25 | 26 | Alternatively, restart the operator, and allow it to remove the finalizers. 27 | -------------------------------------------------------------------------------- /tests/registries/test_matching_of_resources.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import Mock 2 | 3 | from kopf._cogs.structs.references import Resource, Selector 4 | from kopf._core.intents.registries import _matches_resource 5 | 6 | 7 | def test_different_resource(): 8 | selector = Selector('group1', 'version1', 'plural1') 9 | resource = Resource('group2', 'version2', 'plural2') 10 | handler = Mock(selector=selector) 11 | matches = _matches_resource(handler, resource) 12 | assert not matches 13 | 14 | 15 | def test_equivalent_resources(): 16 | selector = Selector('group1', 'version1', 'plural1') 17 | resource = Resource('group1', 'version1', 'plural1') 18 | handler = Mock(selector=selector) 19 | matches = _matches_resource(handler, resource) 20 | assert matches 21 | 22 | 23 | def test_catchall_with_none(): 24 | resource = Resource('group2', 'version2', 'plural2') 25 | handler = Mock(selector=None) 26 | matches = _matches_resource(handler, resource) 27 | assert matches 28 | -------------------------------------------------------------------------------- /tests/reactor/test_uids.py: -------------------------------------------------------------------------------- 1 | from kopf._core.reactor.queueing import get_uid 2 | 3 | 4 | def test_uid_is_used_if_present(): 5 | raw_event = {'type': ..., 'object': {'metadata': {'uid': '123'}}} 6 | uid = get_uid(raw_event) 7 | 8 | assert isinstance(uid, str) 9 | assert uid == '123' 10 | 11 | 12 | def test_uid_is_simulated_if_absent(): 13 | raw_event = {'type': ..., 14 | 'object': { 15 | 'apiVersion': 'group/v1', 16 | 'kind': 'Kind1', 17 | 'metadata': { 18 | 'name': 'name1', 19 | 'namespace': 'namespace1', 20 | 'creationTimestamp': 'created1', 21 | }}} 22 | uid = get_uid(raw_event) 23 | 24 | # The exact order is irrelevant. 25 | assert isinstance(uid, str) 26 | assert 'created1' in uid 27 | assert 'name1' in uid 28 | assert 'namespace' in uid 29 | assert 'Kind1' in uid 30 | assert 'group/v1' in uid 31 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | schedule: 9 | - cron: "6 2 * * 3" 10 | 11 | jobs: 12 | analyze: 13 | name: Analyze 14 | runs-on: ubuntu-latest 15 | permissions: 16 | actions: read 17 | contents: read 18 | security-events: write 19 | 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | language: [ python ] 24 | 25 | steps: 26 | - name: Checkout 27 | uses: actions/checkout@v5 28 | 29 | - name: Initialize CodeQL 30 | uses: github/codeql-action/init@v4 31 | with: 32 | languages: ${{ matrix.language }} 33 | queries: +security-and-quality 34 | 35 | - name: Autobuild 36 | uses: github/codeql-action/autobuild@v4 37 | 38 | - name: Perform CodeQL Analysis 39 | uses: github/codeql-action/analyze@v2 40 | with: 41 | category: "/language:${{ matrix.language }}" 42 | -------------------------------------------------------------------------------- /kopf/_cogs/helpers/typedefs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Rudimentary type [re-]definitions for cross-versioned Python & mypy. 3 | 4 | The problem is that new mypy versions often bring type-sheds with StdLib types 5 | defined as generics, while the old Python runtime (down to 3.9 & 3.10) 6 | does not support the usual syntax. 7 | Examples: asyncio.Task, asyncio.Future, logging.LoggerAdapter, and others. 8 | 9 | This modules defines them in a most suitable and reusable way. Plus it adds 10 | some common plain type definitions used across the codebase (for convenience). 11 | """ 12 | import logging 13 | from typing import TYPE_CHECKING, Any, TypeAlias 14 | 15 | if TYPE_CHECKING: 16 | LoggerAdapter: TypeAlias = logging.LoggerAdapter[Any] 17 | else: 18 | LoggerAdapter: TypeAlias = logging.LoggerAdapter 19 | 20 | # As publicly exposed: we only promise that it is based on one of the built-in loggable classes. 21 | # Mind that these classes have multi-versioned stubs, so we avoid redefining the protocol ourselves. 22 | Logger: TypeAlias = logging.Logger | LoggerAdapter 23 | -------------------------------------------------------------------------------- /docs/minikube.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Minikube 3 | ======== 4 | 5 | .. highlight:: bash 6 | 7 | To develop the framework and the operators in an isolated Kubernetes cluster, 8 | use minikube_. 9 | 10 | .. _minikube: https://github.com/kubernetes/minikube 11 | 12 | MacOS:: 13 | 14 | brew install minikube 15 | brew install hyperkit 16 | 17 | minikube start --driver=hyperkit 18 | minikube config set driver hyperkit 19 | 20 | Start the minikube cluster:: 21 | 22 | minikube start 23 | minikube dashboard 24 | 25 | It automatically creates and activates the kubectl context named ``minikube``. 26 | If not, or if you have multiple clusters, activate it explicitly:: 27 | 28 | kubectl config get-contexts 29 | kubectl config current-context 30 | kubectl config use-context minikube 31 | 32 | For the minikube cleanup (to release the CPU/RAM/disk resources):: 33 | 34 | minikube stop 35 | minikube delete 36 | 37 | .. seealso:: 38 | For even more information, read the `Minikube installation manual`__. 39 | 40 | __ https://kubernetes.io/docs/tasks/tools/install-minikube/ 41 | -------------------------------------------------------------------------------- /tests/dicts/test_parsing.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kopf._cogs.structs.dicts import parse_field 4 | 5 | 6 | def test_from_none(): 7 | path = parse_field(None) 8 | assert isinstance(path, tuple) 9 | assert len(path) == 0 10 | 11 | 12 | def test_from_string_one_level(): 13 | path = parse_field('field') 14 | assert isinstance(path, tuple) 15 | assert path == ('field',) 16 | 17 | 18 | def test_from_string_two_levels(): 19 | path = parse_field('field.subfield') 20 | assert isinstance(path, tuple) 21 | assert path == ('field', 'subfield') 22 | 23 | 24 | def test_from_list(): 25 | path = parse_field(['field' , 'subfield']) 26 | assert isinstance(path, tuple) 27 | assert path == ('field', 'subfield') 28 | 29 | 30 | def test_from_tuple(): 31 | path = parse_field(('field' , 'subfield')) 32 | assert isinstance(path, tuple) 33 | assert path == ('field', 'subfield') 34 | 35 | 36 | @pytest.mark.parametrize('val', [dict(), set(), frozenset()]) 37 | def test_from_others_fails(val): 38 | with pytest.raises(ValueError): 39 | parse_field(val) 40 | -------------------------------------------------------------------------------- /tools/install-minikube.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Install K8s via Minikube. 3 | # 4 | # Minikube is heavy, but reliable, can run almost any version of K8s. 5 | # Spin-up times previously detected: 6 | # * k3d -- 20 seconds. 7 | # * kind -- 90 seconds. 8 | # * minikube -- 110-120 seconds. 9 | # 10 | # Based on https://github.com/LiliC/travis-minikube. 11 | # 12 | set -eu 13 | set -x 14 | 15 | : ${K8S:=latest} 16 | if [[ "$K8S" == latest ]] ; then 17 | K8S="$( curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt )" 18 | fi 19 | 20 | curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 21 | chmod +x minikube 22 | sudo mv minikube /usr/local/bin/ 23 | 24 | mkdir -p $HOME/.kube $HOME/.minikube 25 | touch $HOME/.kube/config 26 | 27 | sudo apt-get update -y 28 | sudo apt-get install -y conntrack # see #334 29 | 30 | minikube config set driver docker 31 | minikube start \ 32 | --extra-config=apiserver.authorization-mode=Node,RBAC \ 33 | --extra-config=apiserver.runtime-config=events.k8s.io/v1beta1=false \ 34 | --kubernetes-version="$K8S" 35 | -------------------------------------------------------------------------------- /CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Project Contributors 2 | 3 | All external contributors to the project, we are grateful for all their help. 4 | 5 | For the detailed information on who did what, 6 | see [GitHub Contributors](https://github.com/nolar/kopf/graphs/contributors) 7 | and [Historic GitHub Contributors](https://github.com/zalando-incubator/kopf/graphs/contributors). 8 | 9 | ## Contributors sorted alphabetically 10 | 11 | - [Anthony Nash](https://github.com/nashant) 12 | - [Daniel Middlecote](https://github.com/dlmiddlecote) 13 | - [Henning Jacobs](https://github.com/hjacobs) 14 | - [Clement Liaw](https://github.com/iexalt) 15 | - [Ismail Kaboubi](https://github.com/smileisak) 16 | - [Michael Narodovitch](https://github.com/michaelnarodovitch) 17 | - [Rodrigo Tavares](https://github.com/tavaresrodrigo) 18 | - [Sergey Vasilyev](https://github.com/nolar) 19 | - [Soroosh Sarabadani](https://github.com/psycho-ir) 20 | - [Trond Hindenes](https://github.com/trondhindenes) 21 | - [Vennamaneni Sai Narasimha](https://github.com/thevennamaneni) 22 | - [Cliff Burdick](https://github.com/cliffburdick) 23 | - [CJ Baar](https://github.com/cjbaar) 24 | -------------------------------------------------------------------------------- /tests/hierarchies/conftest.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import Mock 2 | 3 | import pytest 4 | 5 | 6 | class CustomIterable: 7 | def __init__(self, objs): 8 | self._objs = objs 9 | 10 | def __iter__(self): 11 | yield from self._objs 12 | 13 | 14 | @pytest.fixture(params=[list, tuple, CustomIterable], 15 | ids=['list', 'tuple', 'custom']) 16 | def multicls(request): 17 | return request.param 18 | 19 | 20 | @pytest.fixture() 21 | def pykube_object(pykube): 22 | obj = pykube.objects.CronJob(Mock(), { 23 | 'metadata': {}, 24 | 'spec': { 25 | 'jobTemplate': {}, 26 | }, 27 | }) 28 | return obj 29 | 30 | 31 | @pytest.fixture() 32 | def kubernetes_model(kubernetes): 33 | # The most tricky class -- with attribute-to-key mapping (jobTemplate). 34 | obj = kubernetes.client.V1CronJob( 35 | metadata=kubernetes.client.V1ObjectMeta(), 36 | spec=kubernetes.client.V1CronJobSpec( 37 | schedule='* * * * *', 38 | job_template=kubernetes.client.V1JobTemplateSpec(), 39 | ), 40 | ) 41 | return obj 42 | -------------------------------------------------------------------------------- /examples/10-builtins/README.md: -------------------------------------------------------------------------------- 1 | # Kopf example for built-in resources 2 | 3 | Kopf can also handle the built-in resources, such as Pods, Jobs, etc. 4 | 5 | In this example, we take control all over the pods (namespaced/cluster-wide), 6 | and allow the pods to exist for no longer than 30 seconds -- 7 | either after creation or after the operator restart. 8 | 9 | For no specific reason, just for fun. Maybe, as a way of Chaos Engineering 10 | to force making the resilient applications (tolerant to pod killing). 11 | 12 | However, the system namespaces (kube-system, etc) are explicitly protected -- 13 | to prevent killing the cluster itself. 14 | 15 | Start the operator: 16 | 17 | ```bash 18 | kopf run example.py --verbose 19 | ``` 20 | 21 | Start a sample pod: 22 | 23 | ```bash 24 | kubectl run -it --image=ubuntu expr1 -- bash -i 25 | # wait for 30s 26 | ``` 27 | 28 | Since `kubectl run` creates a Deployment, not just a Pod, 29 | a new pod will be created every 30 seconds. Observe with: 30 | 31 | ```bash 32 | kubectl get pods --watch 33 | ``` 34 | 35 | Cleanup in the end: 36 | 37 | ```bash 38 | $ kubectl delete deployment expr1 39 | ``` 40 | -------------------------------------------------------------------------------- /tests/dicts/test_ensuring.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kopf._cogs.structs.dicts import ensure 4 | 5 | 6 | def test_existing_key(): 7 | d = {'abc': {'def': {'hij': 'val'}}} 8 | ensure(d, ['abc', 'def', 'hij'], 'new') 9 | assert d == {'abc': {'def': {'hij': 'new'}}} 10 | 11 | 12 | def test_unexisting_key_in_existing_dict(): 13 | d = {'abc': {'def': {}}} 14 | ensure(d, ['abc', 'def', 'hij'], 'new') 15 | assert d == {'abc': {'def': {'hij': 'new'}}} 16 | 17 | 18 | def test_unexisting_key_in_unexisting_dict(): 19 | d = {} 20 | ensure(d, ['abc', 'def', 'hij'], 'new') 21 | assert d == {'abc': {'def': {'hij': 'new'}}} 22 | 23 | 24 | def test_toplevel_key(): 25 | d = {'key': 'val'} 26 | ensure(d, ['key'], 'new') 27 | assert d == {'key': 'new'} 28 | 29 | 30 | def test_nonmapping_key(): 31 | d = {'key': 'val'} 32 | with pytest.raises(TypeError): 33 | ensure(d, ['key', 'sub'], 'new') 34 | 35 | 36 | def test_empty_path(): 37 | d = {} 38 | with pytest.raises(ValueError) as e: 39 | ensure(d, [], 'new') 40 | assert "Setting a root of a dict is impossible" in str(e.value) 41 | -------------------------------------------------------------------------------- /examples/03-exceptions/README.md: -------------------------------------------------------------------------------- 1 | # Kopf example with exceptions in the handler 2 | 3 | This example raises the exceptions in the handler, 4 | so that it is retried a few times until it succeeds. 5 | 6 | Start the operator: 7 | 8 | ```bash 9 | kopf run example.py --verbose 10 | ``` 11 | 12 | Observe how the exceptions are repored in the operator's log (stderr), 13 | and also briefly reported as the events on the processed object: 14 | 15 | ```bash 16 | $ kubectl apply -f ../obj.yaml 17 | $ kubectl describe kopfexample kopf-example-1 18 | Name: kopf-example-1 19 | Namespace: default 20 | Labels: somelabel=somevalue 21 | ... 22 | Status: 23 | Events: 24 | Type Reason Age From Message 25 | ---- ------ ---- ---- ------- 26 | Error Exception 9s kopf Handler create_fn failed.: First failure. 27 | Error MyException 6s kopf Handler create_fn failed.: Second failure. 28 | Normal Success 4s kopf Handler create_fn succeeded. 29 | Normal Finished 4s kopf All handlers succeeded. 30 | ``` 31 | 32 | Cleanup in the end: 33 | 34 | ```bash 35 | $ kubectl delete -f ../obj.yaml 36 | ``` 37 | -------------------------------------------------------------------------------- /examples/02-children/example.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | import pykube 3 | import yaml 4 | 5 | 6 | @kopf.on.create('kopfexamples') 7 | def create_fn(spec, **kwargs): 8 | 9 | # Render the pod yaml with some spec fields used in the template. 10 | doc = yaml.safe_load(f""" 11 | apiVersion: v1 12 | kind: Pod 13 | spec: 14 | containers: 15 | - name: the-only-one 16 | image: busybox 17 | command: ["sh", "-x", "-c"] 18 | args: 19 | - | 20 | echo "FIELD=$FIELD" 21 | sleep {spec.get('duration', 0)} 22 | env: 23 | - name: FIELD 24 | value: {spec.get('field', 'default-value')} 25 | """) 26 | 27 | # Make it our child: assign the namespace, name, labels, owner references, etc. 28 | kopf.adopt(doc) 29 | 30 | # Actually create an object by requesting the Kubernetes API. 31 | api = pykube.HTTPClient(pykube.KubeConfig.from_env()) 32 | pod = pykube.Pod(api, doc) 33 | pod.create() 34 | api.session.close() 35 | 36 | # Update the parent's status. 37 | return {'children': [pod.metadata['uid']]} 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020-2025 Sergey Vasilyev 4 | Copyright (c) 2019-2020 Zalando SE 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /examples/02-children/README.md: -------------------------------------------------------------------------------- 1 | # Kopf example with children 2 | 3 | This example creates a `Pod` for every created `KopfExample` object, 4 | and attaches it as a child of that example object. The latter means that 5 | when the parent object is deleted, the child pod is also terminated. 6 | 7 | Start the operator: 8 | 9 | ```bash 10 | kopf run example.py --verbose 11 | ``` 12 | 13 | The child pod's id is stored as the parent's status field, 14 | so that it can be seen on the object listing (see also `crd.yaml`): 15 | 16 | ```bash 17 | $ kubectl apply -f ../obj.yaml 18 | $ kubectl get kopfexamples 19 | NAME FIELD CHILDREN 20 | kopf-example-1 value [aed7f7ac-2971-11e9-b4d3-061441377794] 21 | 22 | $ kubectl get pod -l somelabel=somevalue 23 | NAME READY STATUS RESTARTS AGE 24 | kopf-example-1-jvlfs 1/1 Running 0 26s 25 | ``` 26 | 27 | ```bash 28 | $ kubectl delete -f ../obj.yaml 29 | $ kubectl get pod -l somelabel=somevalue 30 | NAME READY STATUS RESTARTS AGE 31 | kopf-example-1-jvlfs 1/1 Terminating 0 52s 32 | ``` 33 | 34 | Cleanup in the end: 35 | 36 | ```bash 37 | $ kubectl delete -f ../obj.yaml 38 | ``` 39 | -------------------------------------------------------------------------------- /kopf/_cogs/clients/creating.py: -------------------------------------------------------------------------------- 1 | from typing import cast 2 | 3 | from kopf._cogs.clients import api 4 | from kopf._cogs.configs import configuration 5 | from kopf._cogs.helpers import typedefs 6 | from kopf._cogs.structs import bodies, references 7 | 8 | 9 | async def create_obj( 10 | *, 11 | settings: configuration.OperatorSettings, 12 | resource: references.Resource, 13 | namespace: references.Namespace = None, 14 | name: str | None = None, 15 | body: bodies.RawBody | None = None, 16 | logger: typedefs.Logger, 17 | ) -> bodies.RawBody | None: 18 | """ 19 | Create a resource. 20 | """ 21 | body = body if body is not None else {} 22 | if namespace is not None: 23 | body.setdefault('metadata', {}).setdefault('namespace', namespace) 24 | if name is not None: 25 | body.setdefault('metadata', {}).setdefault('name', name) 26 | 27 | namespace = cast(references.Namespace, body.get('metadata', {}).get('namespace')) 28 | created_body: bodies.RawBody = await api.post( 29 | url=resource.get_url(namespace=namespace), 30 | payload=body, 31 | logger=logger, 32 | settings=settings, 33 | ) 34 | return created_body 35 | -------------------------------------------------------------------------------- /docs/install.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Installation 3 | ============ 4 | 5 | .. highlight:: bash 6 | 7 | Prerequisites: 8 | 9 | * Python >= 3.10 (CPython and PyPy are officially tested and supported). 10 | 11 | To install Kopf:: 12 | 13 | pip install kopf 14 | 15 | If you use some of the managed Kubernetes services which require a sophisticated 16 | authentication beyond username+password, fixed tokens, or client SSL certs 17 | (also see :ref:`authentication piggy-backing `):: 18 | 19 | pip install kopf[full-auth] 20 | 21 | If you want extra i/o performance under the hood, install it as (also see :ref:`custom-event-loops`):: 22 | 23 | pip install kopf[uvloop] 24 | 25 | Unless you use the standalone mode, 26 | create a few Kopf-specific custom resources in the cluster:: 27 | 28 | kubectl apply -f https://github.com/nolar/kopf/raw/main/peering.yaml 29 | 30 | Optionally, if you are going to use the examples or the code snippets:: 31 | 32 | kubectl apply -f https://github.com/nolar/kopf/raw/main/examples/crd.yaml 33 | 34 | .. todo:: RBAC objects! kubectl apply -f rbac.yaml 35 | 36 | You are ready to go:: 37 | 38 | kopf --help 39 | kopf run --help 40 | kopf run examples/01-minimal/example.py 41 | -------------------------------------------------------------------------------- /tests/primitives/test_conditions.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import pytest 4 | 5 | from kopf._cogs.aiokits.aiobindings import condition_chain 6 | 7 | 8 | async def test_no_triggering(): 9 | source = asyncio.Condition() 10 | target = asyncio.Condition() 11 | task = asyncio.create_task(condition_chain(source, target)) 12 | try: 13 | with pytest.raises(asyncio.TimeoutError): 14 | async with target: 15 | await asyncio.wait_for(target.wait(), timeout=0.1) 16 | finally: 17 | task.cancel() 18 | await asyncio.wait([task]) 19 | 20 | 21 | async def test_triggering(timer): 22 | source = asyncio.Condition() 23 | target = asyncio.Condition() 24 | task = asyncio.create_task(condition_chain(source, target)) 25 | try: 26 | 27 | async def delayed_trigger(): 28 | async with source: 29 | source.notify_all() 30 | 31 | loop = asyncio.get_running_loop() 32 | loop.call_later(0.1, asyncio.create_task, delayed_trigger()) 33 | 34 | with timer: 35 | async with target: 36 | await target.wait() 37 | 38 | assert 0.1 <= timer.seconds <= 0.2 39 | 40 | finally: 41 | task.cancel() 42 | await asyncio.wait([task]) 43 | -------------------------------------------------------------------------------- /tests/handling/daemons/test_daemon_rematching.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import kopf 4 | from kopf._core.intents.stoppers import DaemonStoppingReason 5 | 6 | 7 | async def test_running_daemon_is_stopped_when_mismatches( 8 | resource, dummy, timer, mocker, caplog, assert_logs, k8s_mocked, simulate_cycle): 9 | caplog.set_level(logging.DEBUG) 10 | 11 | @kopf.daemon(*resource, id='fn', when=lambda **_: is_matching) 12 | async def fn(**kwargs): 13 | dummy.mock() 14 | dummy.kwargs = kwargs 15 | dummy.steps['called'].set() 16 | await kwargs['stopped'].wait() 17 | 18 | # Ensure it is spawned while it is matching. (The same as the spawning tests.) 19 | mocker.resetall() 20 | is_matching = True 21 | await simulate_cycle({}) 22 | await dummy.steps['called'].wait() 23 | assert dummy.mock.call_count == 1 24 | 25 | # Ensure it is stopped once it stops matching. (The same as the termination tests.) 26 | mocker.resetall() 27 | is_matching = False 28 | await simulate_cycle({}) 29 | with timer: 30 | await dummy.wait_for_daemon_done() 31 | 32 | assert timer.seconds < 0.01 # near-instantly 33 | stopped = dummy.kwargs['stopped'] 34 | assert DaemonStoppingReason.FILTERS_MISMATCH in stopped.reason 35 | -------------------------------------------------------------------------------- /examples/04-events/README.md: -------------------------------------------------------------------------------- 1 | # Kopf example with the event reporting 2 | 3 | The framework reports some basic events on the handling progress. 4 | But the developers can report their own events conveniently. 5 | 6 | Start the operator: 7 | 8 | ```bash 9 | kopf run example.py --verbose 10 | ``` 11 | 12 | The events are shown on the object's description 13 | (and are usually garbage-collected after a few minutes). 14 | 15 | ```bash 16 | $ kubectl apply -f ../obj.yaml 17 | $ kubectl describe kopfexample kopf-example-1 18 | ... 19 | Events: 20 | Type Reason Age From Message 21 | ---- ------ ---- ---- ------- 22 | Normal SomeReason 5s kopf Some message 23 | Normal Success 5s kopf Handler create_fn succeeded. 24 | SomeType SomeReason 6s kopf Some message 25 | Normal Finished 5s kopf All handlers succeeded. 26 | Error SomeReason 5s kopf Some exception: Exception text. 27 | Warning SomeReason 5s kopf Some message 28 | 29 | ``` 30 | 31 | Note that the events are shown out of any order -- this is a behaviour of the CLI tool or of the API. 32 | It has nothing to do with the framework: the framework reports the timestamps properly. 33 | 34 | Cleanup in the end: 35 | 36 | ```bash 37 | $ kubectl delete -f ../obj.yaml 38 | ``` 39 | -------------------------------------------------------------------------------- /tests/peering/test_resource_guessing.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kopf._cogs.structs.references import CLUSTER_PEERINGS_K, CLUSTER_PEERINGS_Z, \ 4 | NAMESPACED_PEERINGS_K, NAMESPACED_PEERINGS_Z 5 | from kopf._core.engines.peering import guess_selectors 6 | 7 | 8 | @pytest.mark.parametrize('namespaced, expected_selectors', [ 9 | (False, [CLUSTER_PEERINGS_K, CLUSTER_PEERINGS_Z]), 10 | (True, [NAMESPACED_PEERINGS_K, NAMESPACED_PEERINGS_Z]), 11 | ]) 12 | @pytest.mark.parametrize('mandatory', [False, True]) 13 | def test_resource_when_not_standalone(settings, namespaced, mandatory, expected_selectors): 14 | settings.peering.standalone = False 15 | settings.peering.namespaced = namespaced 16 | settings.peering.mandatory = mandatory 17 | selectors = guess_selectors(settings=settings) 18 | assert selectors == expected_selectors 19 | 20 | 21 | @pytest.mark.parametrize('namespaced', [False, True]) 22 | @pytest.mark.parametrize('mandatory', [False, True]) 23 | def test_resource_when_standalone(settings, namespaced, mandatory): 24 | settings.peering.standalone = True 25 | settings.peering.namespaced = namespaced 26 | settings.peering.mandatory = mandatory 27 | selectors = guess_selectors(settings=settings) 28 | assert not selectors 29 | -------------------------------------------------------------------------------- /examples/03-exceptions/example.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | 3 | 4 | class MyException(Exception): 5 | pass 6 | 7 | 8 | @kopf.on.create('kopfexamples') 9 | def instant_failure_with_only_a_message(**kwargs): 10 | raise kopf.PermanentError("Fail once and for all.") 11 | 12 | 13 | @kopf.on.create('kopfexamples') 14 | def eventual_success_with_a_few_messages(retry, **kwargs): 15 | if retry < 3: # 0, 1, 2, 3 16 | raise kopf.TemporaryError("Expected recoverable error.", delay=1.0) 17 | 18 | 19 | @kopf.on.create('kopfexamples', retries=3, backoff=1.0) 20 | def eventual_failure_with_tracebacks(**kwargs): 21 | raise MyException("An error that is supposed to be recoverable.") 22 | 23 | 24 | @kopf.on.create('kopfexamples', errors=kopf.ErrorsMode.PERMANENT, backoff=1.0) 25 | def instant_failure_with_traceback(**kwargs): 26 | raise MyException("An error that is supposed to be recoverable.") 27 | 28 | 29 | # Marks for the e2e tests (see tests/e2e/test_examples.py): 30 | E2E_ALLOW_TRACEBACKS = True 31 | E2E_CREATION_STOP_WORDS = ['Something has changed,'] 32 | E2E_SUCCESS_COUNTS = {'eventual_success_with_a_few_messages': 1} 33 | E2E_FAILURE_COUNTS = {'eventual_failure_with_tracebacks': 1, 34 | 'instant_failure_with_traceback': 1, 35 | 'instant_failure_with_only_a_message': 1} 36 | -------------------------------------------------------------------------------- /tests/test_versions.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Any 3 | 4 | import pytest 5 | 6 | from kopf._cogs.clients.auth import APIContext, authenticated 7 | 8 | 9 | def test_package_version(): 10 | import kopf 11 | assert hasattr(kopf, '__version__') 12 | assert kopf.__version__ # not empty, not null 13 | 14 | 15 | @pytest.mark.parametrize('version, useragent', [ 16 | ('1.2.3', 'kopf/1.2.3'), 17 | ('1.2rc', 'kopf/1.2rc'), 18 | (None, 'kopf/unknown'), 19 | ]) 20 | async def test_http_user_agent_version( 21 | aresponses, hostname, fake_vault, mocker, version, useragent): 22 | 23 | mocker.patch('kopf._cogs.helpers.versions.version', version) 24 | 25 | @authenticated 26 | async def get_it(url: str, *, context: APIContext) -> dict[str, Any]: 27 | response = await context.session.get(url) 28 | return await response.json() 29 | 30 | async def responder(request): 31 | return aresponses.Response( 32 | content_type='application/json', 33 | text=json.dumps(dict(request.headers))) 34 | 35 | aresponses.add(hostname, '/', 'get', responder) 36 | returned_headers = await get_it(f"http://{hostname}/") 37 | assert returned_headers['User-Agent'] == useragent 38 | await fake_vault.close() # to prevent ResourceWarnings for unclosed connectors 39 | -------------------------------------------------------------------------------- /tests/admission/test_serving_ephemeral_memos.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kopf._core.engines.admission import serve_admission_request 4 | 5 | 6 | @pytest.mark.parametrize('operation', ['CREATE']) 7 | async def test_memo_is_not_remembered_if_admission_is_for_creation( 8 | settings, registry, resource, memories, insights, indices, adm_request, operation): 9 | 10 | adm_request['request']['operation'] = operation 11 | await serve_admission_request( 12 | adm_request, 13 | settings=settings, registry=registry, insights=insights, 14 | memories=memories, memobase=object(), indices=indices, 15 | ) 16 | known_memories = list(memories.iter_all_memories()) 17 | assert not known_memories 18 | 19 | 20 | @pytest.mark.parametrize('operation', ['UPDATE', 'DELETE', 'CONNECT', '*WHATEVER*']) 21 | async def test_memo_is_remembered_if_admission_for_other_operations( 22 | settings, registry, resource, memories, insights, indices, adm_request, operation): 23 | 24 | adm_request['request']['operation'] = operation 25 | await serve_admission_request( 26 | adm_request, 27 | settings=settings, registry=registry, insights=insights, 28 | memories=memories, memobase=object(), indices=indices, 29 | ) 30 | known_memories = list(memories.iter_all_memories()) 31 | assert len(known_memories) == 1 32 | -------------------------------------------------------------------------------- /tests/handling/daemons/test_daemon_spawning.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import kopf 4 | 5 | 6 | async def test_daemon_is_spawned_at_least_once( 7 | resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): 8 | caplog.set_level(logging.DEBUG) 9 | 10 | @kopf.daemon(*resource, id='fn') 11 | async def fn(**kwargs): 12 | dummy.mock() 13 | dummy.kwargs = kwargs 14 | dummy.steps['called'].set() 15 | 16 | await simulate_cycle({}) 17 | 18 | await dummy.steps['called'].wait() 19 | await dummy.wait_for_daemon_done() 20 | 21 | assert dummy.mock.call_count == 1 # not restarted 22 | 23 | 24 | async def test_daemon_initial_delay_obeyed( 25 | resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): 26 | caplog.set_level(logging.DEBUG) 27 | 28 | @kopf.daemon(*resource, id='fn', initial_delay=1.0) 29 | async def fn(**kwargs): 30 | dummy.mock() 31 | dummy.kwargs = kwargs 32 | dummy.steps['called'].set() 33 | 34 | await simulate_cycle({}) 35 | 36 | await dummy.steps['called'].wait() 37 | await dummy.wait_for_daemon_done() 38 | 39 | assert k8s_mocked.sleep.call_count >= 1 40 | assert k8s_mocked.sleep.call_count <= 2 # one optional extra call for sleep(None) 41 | assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 # [call#][args/kwargs][arg#] 42 | -------------------------------------------------------------------------------- /tests/hierarchies/test_type_validation.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import kopf 4 | from kopf._cogs.structs.bodies import Body 5 | 6 | 7 | def test_in_owner_reference_appending(): 8 | with pytest.raises(TypeError) as e: 9 | kopf.append_owner_reference(object(), Body({})) 10 | assert "K8s object class is not supported" in str(e.value) 11 | 12 | 13 | def test_in_owner_reference_removal(): 14 | with pytest.raises(TypeError) as e: 15 | kopf.remove_owner_reference(object(), Body({})) 16 | assert "K8s object class is not supported" in str(e.value) 17 | 18 | 19 | def test_in_name_harmonization(): 20 | with pytest.raises(TypeError) as e: 21 | kopf.harmonize_naming(object(), 'x') 22 | assert "K8s object class is not supported" in str(e.value) 23 | 24 | 25 | def test_in_namepace_adjustment(): 26 | with pytest.raises(TypeError) as e: 27 | kopf.adjust_namespace(object(), 'x') 28 | assert "K8s object class is not supported" in str(e.value) 29 | 30 | 31 | def test_in_labelling(): 32 | with pytest.raises(TypeError) as e: 33 | kopf.label(object(), {}) 34 | assert "K8s object class is not supported" in str(e.value) 35 | 36 | 37 | def test_in_adopting(): 38 | with pytest.raises(TypeError) as e: 39 | kopf.adopt(object(), Body({})) 40 | assert "K8s object class is not supported" in str(e.value) 41 | -------------------------------------------------------------------------------- /tests/dicts/test_removing.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kopf._cogs.structs.dicts import remove 4 | 5 | 6 | def test_existing_key(): 7 | d = {'abc': {'def': {'hij': 'val', 'hello': 'world'}}} 8 | remove(d, ['abc', 'def', 'hij']) 9 | assert d == {'abc': {'def': {'hello': 'world'}}} 10 | 11 | 12 | def test_unexisting_key_in_existing_dict(): 13 | d = {'abc': {'def': {'hello': 'world'}}} 14 | remove(d, ['abc', 'def', 'hij']) 15 | assert d == {'abc': {'def': {'hello': 'world'}}} 16 | 17 | 18 | def test_unexisting_key_in_unexisting_dict(): 19 | d = {} 20 | remove(d, ['abc', 'def', 'hij']) 21 | assert d == {} 22 | 23 | 24 | def test_parent_cascaded_deletion_up_to_the_root(): 25 | d = {'abc': {'def': {'hij': 'val'}}} 26 | remove(d, ['abc', 'def', 'hij']) 27 | assert d == {} 28 | 29 | 30 | def test_parent_cascaded_deletion_up_to_a_middle(): 31 | d = {'abc': {'def': {'hij': 'val'}, 'hello': 'world'}} 32 | remove(d, ['abc', 'def', 'hij']) 33 | assert d == {'abc': {'hello': 'world'}} 34 | 35 | 36 | def test_nonmapping_key(): 37 | d = {'key': 'val'} 38 | with pytest.raises(TypeError): 39 | remove(d, ['key', 'sub']) 40 | 41 | 42 | def test_empty_path(): 43 | d = {} 44 | with pytest.raises(ValueError) as e: 45 | remove(d, []) 46 | assert "Removing a root of a dict is impossible" in str(e.value) 47 | -------------------------------------------------------------------------------- /docs/startup.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | Startup 3 | ======= 4 | 5 | The startup handlers are slightly different from the module-level code: 6 | the actual tasks (e.g. API calls for resource watching) are not started 7 | until all the startup handlers succeed. 8 | 9 | The handlers run inside of the operator's event loop, so they can initialise 10 | the loop-bound variables -- which is impossible in the module-level code:: 11 | 12 | import asyncio 13 | import kopf 14 | 15 | LOCK: asyncio.Lock 16 | 17 | @kopf.on.startup() 18 | async def startup_fn(logger, **kwargs): 19 | global LOCK 20 | LOCK = asyncio.Lock() # uses the running asyncio loop by default 21 | 22 | If any of the startup handlers fail, the operator fails to start 23 | without making any external API calls. 24 | 25 | .. note:: 26 | 27 | If the operator is running in a Kubernetes cluster, there can be 28 | timeouts set for liveness/readiness checks of a pod. 29 | 30 | If the startup takes too long in total (e.g. due to retries), 31 | the pod can be killed by Kubernetes as not responding to the probes. 32 | 33 | Either design the startup activities to be as fast as possible, 34 | or configure the liveness/readiness probes accordingly. 35 | 36 | Kopf itself does not set any implicit timeouts for the startup activity, 37 | and it can continue forever (unless explicitly limited). 38 | -------------------------------------------------------------------------------- /tests/authentication/test_tempfiles.py: -------------------------------------------------------------------------------- 1 | import gc 2 | import os.path 3 | 4 | from kopf._cogs.clients.auth import _TempFiles 5 | 6 | 7 | def test_created(): 8 | tempfiles = _TempFiles() 9 | path = tempfiles[b'hello'] 10 | assert os.path.isfile(path) 11 | with open(path, 'rb') as f: 12 | assert f.read() == b'hello' 13 | 14 | 15 | def test_reused(): 16 | tempfiles = _TempFiles() 17 | path1 = tempfiles[b'hello'] 18 | path2 = tempfiles[b'hello'] 19 | assert path1 == path2 20 | 21 | 22 | def test_differs(): 23 | tempfiles = _TempFiles() 24 | path1 = tempfiles[b'hello'] 25 | path2 = tempfiles[b'world'] 26 | assert path1 != path2 27 | 28 | 29 | def test_purged(): 30 | tempfiles = _TempFiles() 31 | path1 = tempfiles[b'hello'] 32 | path2 = tempfiles[b'world'] 33 | assert os.path.isfile(path1) 34 | assert os.path.isfile(path2) 35 | 36 | tempfiles.purge() 37 | 38 | assert not os.path.isfile(path1) 39 | assert not os.path.isfile(path2) 40 | 41 | 42 | def test_garbage_collected(): 43 | tempfiles = _TempFiles() 44 | path1 = tempfiles[b'hello'] 45 | path2 = tempfiles[b'world'] 46 | assert os.path.isfile(path1) 47 | assert os.path.isfile(path2) 48 | 49 | del tempfiles 50 | gc.collect() 51 | gc.collect() 52 | gc.collect() 53 | 54 | assert not os.path.isfile(path1) 55 | assert not os.path.isfile(path2) 56 | -------------------------------------------------------------------------------- /docs/shutdown.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Shutdown 3 | ======== 4 | 5 | The cleanup handlers are executed when the operator exits 6 | either by a signal (e.g. SIGTERM) or by catching an exception, 7 | or by raising the stop-flag, or by cancelling the operator's task 8 | (for :doc:`embedded operators `):: 9 | 10 | import kopf 11 | 12 | @kopf.on.cleanup() 13 | async def cleanup_fn(logger, **kwargs): 14 | pass 15 | 16 | The cleanup handlers are not guaranteed to be fully executed if they take 17 | too long -- due to a limited graceful period or non-graceful termination. 18 | 19 | Similarly, the clean up handlers are not executed if the operator 20 | is force-killed with no possibility to react (e.g. by SIGKILL). 21 | 22 | .. note:: 23 | 24 | If the operator is running in a Kubernetes cluster, there can be 25 | timeouts set for graceful termination of a pod 26 | (``terminationGracePeriodSeconds``, the default is 30 seconds). 27 | 28 | If the cleanup takes longer than that in total (e.g. due to retries), 29 | the activity will not be finished in full, 30 | as the pod will be SIGKILL'ed by Kubernetes. 31 | 32 | Either design the cleanup activities to be as fast as possible, 33 | or configure ``terminationGracePeriodSeconds`` accordingly. 34 | 35 | Kopf itself does not set any implicit timeouts for the cleanup activity, 36 | and it can continue forever (unless explicitly limited). 37 | -------------------------------------------------------------------------------- /peering.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: clusterkopfpeerings.kopf.dev 6 | spec: 7 | scope: Cluster 8 | group: kopf.dev 9 | names: 10 | kind: ClusterKopfPeering 11 | plural: clusterkopfpeerings 12 | singular: clusterkopfpeering 13 | versions: 14 | - name: v1 15 | served: true 16 | storage: true 17 | schema: 18 | openAPIV3Schema: 19 | type: object 20 | properties: 21 | status: 22 | type: object 23 | x-kubernetes-preserve-unknown-fields: true 24 | --- 25 | apiVersion: apiextensions.k8s.io/v1 26 | kind: CustomResourceDefinition 27 | metadata: 28 | name: kopfpeerings.kopf.dev 29 | spec: 30 | scope: Namespaced 31 | group: kopf.dev 32 | names: 33 | kind: KopfPeering 34 | plural: kopfpeerings 35 | singular: kopfpeering 36 | versions: 37 | - name: v1 38 | served: true 39 | storage: true 40 | schema: 41 | openAPIV3Schema: 42 | type: object 43 | properties: 44 | status: 45 | type: object 46 | x-kubernetes-preserve-unknown-fields: true 47 | --- 48 | apiVersion: kopf.dev/v1 49 | kind: ClusterKopfPeering 50 | metadata: 51 | name: default 52 | --- 53 | apiVersion: kopf.dev/v1 54 | kind: KopfPeering 55 | metadata: 56 | namespace: default 57 | name: default 58 | --- 59 | -------------------------------------------------------------------------------- /examples/01-minimal/README.md: -------------------------------------------------------------------------------- 1 | # Kopf minimal example 2 | 3 | The minimum codebase needed for to make a runnable Kubernetes operator. 4 | 5 | Start the operator: 6 | 7 | ```bash 8 | kopf run example.py --verbose 9 | ``` 10 | 11 | It does nothing useful, just notices the object creation, 12 | and prints the message to stdout -- can be seen in the operator's output. 13 | 14 | In addition, the object's status is updated, as can be seen here: 15 | 16 | ```bash 17 | $ kubectl apply -f ../obj.yaml 18 | $ kubectl get kopfexamples 19 | NAME DURATION CHILDREN MESSAGE 20 | kopf-example-1 1m hello world 21 | ``` 22 | 23 | ```bash 24 | $ kubectl describe KopfExample kopf-example-1 25 | Name: kopf-example-1 26 | Namespace: default 27 | Labels: somelabel=somevalue 28 | ... 29 | Status: 30 | Message: hello world 31 | Events: 32 | Type Reason Age From Message 33 | ---- ------ ---- ---- ------- 34 | Normal Finished 42s kopf All handlers succeeded. 35 | Normal Success 43s kopf Handler create_fn succeeded. 36 | ``` 37 | 38 | ```bash 39 | $ kubectl get KopfExample kopf-example-1 -o yaml 40 | apiVersion: kopf.dev/v1 41 | kind: KopfExample 42 | metadata: 43 | ... 44 | spec: 45 | duration: 1m 46 | field: value 47 | items: 48 | - item1 49 | - item2 50 | status: 51 | message: hello world 52 | ``` 53 | 54 | Cleanup in the end: 55 | 56 | ```bash 57 | $ kubectl delete -f ../obj.yaml 58 | ``` 59 | -------------------------------------------------------------------------------- /tests/lifecycles/test_real_invocation.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pytest 4 | 5 | import kopf 6 | from kopf._cogs.structs.bodies import Body 7 | from kopf._cogs.structs.ephemera import Memo 8 | from kopf._cogs.structs.patches import Patch 9 | from kopf._core.actions.progression import State 10 | from kopf._core.engines.indexing import OperatorIndexers 11 | from kopf._core.intents.causes import ChangingCause, Reason 12 | 13 | 14 | @pytest.mark.parametrize('lifecycle', [ 15 | kopf.lifecycles.all_at_once, 16 | kopf.lifecycles.one_by_one, 17 | kopf.lifecycles.randomized, 18 | kopf.lifecycles.shuffled, 19 | kopf.lifecycles.asap, 20 | ]) 21 | async def test_protocol_invocation(lifecycle, resource): 22 | """ 23 | To be sure that all kwargs are accepted properly. 24 | Especially when the new kwargs are added or an invocation protocol changed. 25 | """ 26 | # The values are irrelevant, they can be anything. 27 | state = State.from_scratch() 28 | cause = ChangingCause( 29 | logger=logging.getLogger('kopf.test.fake.logger'), 30 | indices=OperatorIndexers().indices, 31 | resource=resource, 32 | patch=Patch(), 33 | memo=Memo(), 34 | body=Body({}), 35 | initial=False, 36 | reason=Reason.NOOP, 37 | ) 38 | handlers = [] 39 | selected = lifecycle(handlers, state=state, **cause.kwargs) 40 | assert isinstance(selected, (tuple, list)) 41 | assert len(selected) == 0 42 | -------------------------------------------------------------------------------- /tests/peering/test_keepalive.py: -------------------------------------------------------------------------------- 1 | from itertools import chain, repeat 2 | from unittest import mock 3 | 4 | import pytest 5 | 6 | from kopf._core.engines.peering import keepalive 7 | 8 | 9 | class StopInfiniteCycleException(Exception): 10 | pass 11 | 12 | 13 | async def test_background_task_runs(mocker, settings, namespaced_peering_resource): 14 | touch_mock = mocker.patch('kopf._core.engines.peering.touch') 15 | 16 | sleep_mock = mocker.patch('asyncio.sleep') 17 | # restore the default behavior after exhausting test values. 18 | # pytest-aiohttp calls asyncio.sleep during teardown, before the mock is removed. 19 | sleep_mock.side_effect = chain([None, None, StopInfiniteCycleException], repeat(mock.DEFAULT)) 20 | 21 | randint_mock = mocker.patch('random.randint') 22 | randint_mock.side_effect = [7, 5, 9] 23 | 24 | settings.peering.lifetime = 33 25 | with pytest.raises(StopInfiniteCycleException): 26 | await keepalive(settings=settings, identity='id', 27 | resource=namespaced_peering_resource, namespace='namespace') 28 | 29 | assert randint_mock.call_count == 3 # only to be sure that we test the right thing 30 | assert sleep_mock.call_count == 3 31 | assert sleep_mock.call_args_list[0][0][0] == 33 - 7 32 | assert sleep_mock.call_args_list[1][0][0] == 33 - 5 33 | assert sleep_mock.call_args_list[2][0][0] == 33 - 9 34 | 35 | assert touch_mock.call_count == 4 # 3 updates + 1 clean-up 36 | -------------------------------------------------------------------------------- /tests/authentication/test_reauthentication.py: -------------------------------------------------------------------------------- 1 | import aiohttp.web 2 | 3 | from kopf._cogs.clients.auth import APIContext, authenticated 4 | from kopf._cogs.structs.credentials import ConnectionInfo 5 | 6 | 7 | @authenticated 8 | async def fn( 9 | x: int, 10 | *, 11 | context: APIContext | None, 12 | ) -> tuple[APIContext, int]: 13 | return context, x + 100 14 | 15 | 16 | async def test_session_is_injected( 17 | fake_vault, resp_mocker, aresponses, hostname, resource, namespace): 18 | 19 | result = {} 20 | get_mock = resp_mocker(return_value=aiohttp.web.json_response(result)) 21 | aresponses.add(hostname, resource.get_url(namespace=namespace, name='xyz'), 'get', get_mock) 22 | 23 | context, result = await fn(1) 24 | 25 | async with context.session: 26 | assert context is not None 27 | assert result == 101 28 | 29 | 30 | async def test_session_is_passed_through( 31 | fake_vault, resp_mocker, aresponses, hostname, resource, namespace): 32 | 33 | result = {} 34 | get_mock = resp_mocker(return_value=aiohttp.web.json_response(result)) 35 | aresponses.add(hostname, resource.get_url(namespace=namespace, name='xyz'), 'get', get_mock) 36 | 37 | explicit_context = APIContext(ConnectionInfo(server='http://irrelevant/')) 38 | context, result = await fn(1, context=explicit_context) 39 | 40 | async with context.session: 41 | assert context is explicit_context 42 | assert result == 101 43 | -------------------------------------------------------------------------------- /tests/registries/test_id_detection.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | from kopf._core.intents.registries import get_callable_id 4 | 5 | 6 | # Used in the tests. Must be global-scoped, or its qualname will be affected. 7 | def some_fn(): 8 | pass 9 | 10 | 11 | def test_id_of_simple_function(): 12 | fn_id = get_callable_id(some_fn) 13 | assert fn_id == 'some_fn' 14 | 15 | 16 | def test_id_of_single_partial(): 17 | partial_fn = functools.partial(some_fn) 18 | 19 | fn_id = get_callable_id(partial_fn) 20 | assert fn_id == 'some_fn' 21 | 22 | 23 | def test_id_of_double_partial(): 24 | partial1_fn = functools.partial(some_fn) 25 | partial2_fn = functools.partial(partial1_fn) 26 | 27 | fn_id = get_callable_id(partial2_fn) 28 | assert fn_id == 'some_fn' 29 | 30 | 31 | def test_id_of_single_wrapper(): 32 | 33 | @functools.wraps(some_fn) 34 | def wrapped_fn(): 35 | pass 36 | 37 | fn_id = get_callable_id(wrapped_fn) 38 | assert fn_id == 'some_fn' 39 | 40 | 41 | def test_id_of_double_wrapper(): 42 | 43 | @functools.wraps(some_fn) 44 | def wrapped1_fn(): 45 | pass 46 | 47 | @functools.wraps(wrapped1_fn) 48 | def wrapped2_fn(): 49 | pass 50 | 51 | fn_id = get_callable_id(wrapped2_fn) 52 | assert fn_id == 'some_fn' 53 | 54 | 55 | def test_id_of_lambda(): 56 | some_lambda = lambda: None 57 | 58 | fn_id = get_callable_id(some_lambda) 59 | assert fn_id.startswith(f'lambda:{__file__}:') 60 | -------------------------------------------------------------------------------- /examples/10-builtins/example.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import kopf 4 | import pykube 5 | 6 | tasks: dict[str, dict[str, asyncio.Task]] = {} # dict{namespace: dict{name: asyncio.Task}} 7 | 8 | 9 | @kopf.on.resume('pods') 10 | @kopf.on.create('pods') 11 | async def pod_in_sight(namespace, name, logger, **kwargs): 12 | if namespace.startswith('kube-'): 13 | return 14 | else: 15 | task = asyncio.create_task(pod_killer(namespace, name, logger)) 16 | tasks.setdefault(namespace, {}) 17 | tasks[namespace][name] = task 18 | 19 | 20 | @kopf.on.delete('pods') 21 | async def pod_deleted(namespace, name, **kwargs): 22 | if namespace in tasks and name in tasks[namespace]: 23 | task = tasks[namespace][name] 24 | task.cancel() # it will also remove from `tasks` 25 | 26 | 27 | async def pod_killer(namespace, name, logger, timeout=30): 28 | try: 29 | logger.info(f"=== Pod killing happens in {timeout}s.") 30 | await asyncio.sleep(timeout) 31 | logger.info(f"=== Pod killing happens NOW!") 32 | 33 | api = pykube.HTTPClient(pykube.KubeConfig.from_env()) 34 | pod = pykube.Pod.objects(api, namespace=namespace).get_by_name(name) 35 | pod.delete() 36 | api.session.close() 37 | 38 | except asyncio.CancelledError: 39 | logger.info(f"=== Pod killing is cancelled!") 40 | 41 | finally: 42 | if namespace in tasks and name in tasks[namespace]: 43 | del tasks[namespace][name] 44 | -------------------------------------------------------------------------------- /tests/persistence/test_outcomes.py: -------------------------------------------------------------------------------- 1 | from kopf._core.actions.execution import Outcome, Result 2 | 3 | 4 | def test_creation_for_ignored_handlers(): 5 | outcome = Outcome(final=True) 6 | assert outcome.final 7 | assert outcome.delay is None 8 | assert outcome.result is None 9 | assert outcome.exception is None 10 | assert not outcome.subrefs 11 | 12 | 13 | def test_creation_for_results(): 14 | result = Result(object()) 15 | outcome = Outcome(final=True, result=result) 16 | assert outcome.final 17 | assert outcome.delay is None 18 | assert outcome.result is result 19 | assert outcome.exception is None 20 | assert not outcome.subrefs 21 | 22 | 23 | def test_creation_for_permanent_errors(): 24 | error = Exception() 25 | outcome = Outcome(final=True, exception=error) 26 | assert outcome.final 27 | assert outcome.delay is None 28 | assert outcome.result is None 29 | assert outcome.exception is error 30 | assert not outcome.subrefs 31 | 32 | 33 | def test_creation_for_temporary_errors(): 34 | error = Exception() 35 | outcome = Outcome(final=False, exception=error, delay=123) 36 | assert not outcome.final 37 | assert outcome.delay == 123 38 | assert outcome.result is None 39 | assert outcome.exception is error 40 | assert not outcome.subrefs 41 | 42 | 43 | def test_creation_with_subrefs(): 44 | outcome = Outcome(final=True, subrefs=['sub1', 'sub2']) 45 | assert outcome.subrefs == ['sub1', 'sub2'] 46 | -------------------------------------------------------------------------------- /examples/crd.yaml: -------------------------------------------------------------------------------- 1 | # A demo CRD for the Kopf example operators. 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: kopfexamples.kopf.dev 6 | spec: 7 | scope: Namespaced 8 | group: kopf.dev 9 | names: 10 | kind: KopfExample 11 | plural: kopfexamples 12 | singular: kopfexample 13 | shortNames: 14 | - kopfexes 15 | - kopfex 16 | - kexes 17 | - kex 18 | versions: 19 | - name: v1 20 | served: true 21 | storage: true 22 | subresources: { status: { } } # comment/uncomment for experiments 23 | schema: 24 | openAPIV3Schema: 25 | type: object 26 | properties: 27 | spec: 28 | type: object 29 | x-kubernetes-preserve-unknown-fields: true 30 | status: 31 | type: object 32 | x-kubernetes-preserve-unknown-fields: true 33 | additionalPrinterColumns: 34 | - name: Duration 35 | type: string 36 | priority: 0 37 | jsonPath: .spec.duration 38 | description: For how long the pod should sleep. 39 | - name: Children 40 | type: string 41 | priority: 0 42 | jsonPath: .status.create_fn.children 43 | description: The children pods created. 44 | - name: Message 45 | type: string 46 | priority: 0 47 | jsonPath: .status.create_fn.message 48 | description: As returned from the handler (sometimes). 49 | -------------------------------------------------------------------------------- /tests/references/test_selector_properties.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kopf._cogs.structs.references import EVERYTHING, Selector 4 | 5 | 6 | @pytest.mark.parametrize('kw', ['kind', 'plural', 'singular', 'shortcut', 'category', 'any_name']) 7 | def test_repr_with_names(kw): 8 | selector = Selector(**{kw: 'name1'}) 9 | selector_repr = repr(selector) 10 | assert selector_repr == f"Selector({kw}='name1')" 11 | 12 | 13 | @pytest.mark.parametrize('group', ['group1', 'group1.example.com', 'v1nonconventional']) 14 | def test_repr_with_group(group): 15 | selector = Selector(group=group, any_name='name1') 16 | selector_repr = repr(selector) 17 | assert selector_repr == f"Selector(group='{group}', any_name='name1')" 18 | 19 | 20 | @pytest.mark.parametrize('kw', ['kind', 'plural', 'singular', 'shortcut', 'any_name']) 21 | def test_is_specific_with_names(kw): 22 | selector = Selector(**{kw: 'name1'}) 23 | assert selector.is_specific 24 | 25 | 26 | @pytest.mark.parametrize('kw', ['category']) 27 | def test_is_not_specific_with_categories(kw): 28 | selector = Selector(**{kw: 'name1'}) 29 | assert not selector.is_specific 30 | 31 | 32 | @pytest.mark.parametrize('kw', ['category']) 33 | def test_is_not_specific_with_categories(kw): 34 | selector = Selector(EVERYTHING) 35 | assert not selector.is_specific 36 | 37 | 38 | @pytest.mark.parametrize('kw', ['category']) 39 | def test_is_not_specific_with_categories(kw): 40 | selector = Selector(**{kw: 'name1'}) 41 | assert not selector.is_specific 42 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.yaml: -------------------------------------------------------------------------------- 1 | name: Question 2 | description: Ask an advice on how to do something 3 | labels: [question, triage] 4 | body: 5 | 6 | - type: markdown 7 | attributes: 8 | value: > 9 | Please provide as much information as possible. 10 | All fields are optional, but a lack of information 11 | may result in a delayed response and time-consuming iterations. 12 | 13 | - type: markdown 14 | attributes: 15 | value: > 16 | _If you feel confident with English, please use English. 17 | If not, feel free to use your native or preferred language 18 | (avoid metaphors and idioms — they do not auto-translate well). 19 | The answers will be in English._ 20 | 21 | - type: markdown 22 | attributes: 23 | value: > 24 | Before asking the question, please first try looking for the answers 25 | in the [documentation](https://kopf.readthedocs.io/en/stable/) 26 | and similar [issues & questions](https://github.com/nolar/kopf/issues). 27 | 28 | - type: input 29 | id: keywords 30 | attributes: 31 | label: Keywords 32 | description: > 33 | Which keywords did you search for in the documentation/issues 34 | for this problem? (Space or comma separated.) 35 | 36 | - type: textarea 37 | id: problem 38 | attributes: 39 | label: Problem 40 | description: > 41 | What problem do you currently face and see no solution for it? 42 | If possible, explain what other ways did you try to solve the problem? 43 | -------------------------------------------------------------------------------- /examples/16-indexing/example.py: -------------------------------------------------------------------------------- 1 | import pprint 2 | 3 | import kopf 4 | 5 | 6 | @kopf.index('pods') 7 | def is_running(namespace, name, status, **_): 8 | return {(namespace, name): status.get('phase') == 'Running'} 9 | # {('kube-system', 'traefik-...-...'): [True], 10 | # ('kube-system', 'helm-install-traefik-...'): [False], 11 | # ...} 12 | 13 | 14 | @kopf.index('pods') 15 | def by_label(labels, name, **_): 16 | return {(label, value): name for label, value in labels.items()} 17 | # {('app', 'traefik'): ['traefik-...-...'], 18 | # ('job-name', 'helm-install-traefik'): ['helm-install-traefik-...'], 19 | # ('helmcharts.helm.cattle.io/chart', 'traefik'): ['helm-install-traefik-...'], 20 | # ...} 21 | 22 | 23 | @kopf.on.probe() # type: ignore 24 | def pod_count(is_running: kopf.Index, **_): 25 | return len(is_running) 26 | 27 | 28 | @kopf.on.probe() # type: ignore 29 | def pod_names(is_running: kopf.Index, **_): 30 | return [name for _, name in is_running] 31 | 32 | 33 | @kopf.timer('kex', interval=5) # type: ignore 34 | def intervalled(is_running: kopf.Index, by_label: kopf.Index, patch: kopf.Patch, **_): 35 | pprint.pprint(dict(by_label)) 36 | patch.status['running-pods'] = [ 37 | f"{ns}::{name}" 38 | for (ns, name), is_running in is_running.items() 39 | if ns in ['kube-system', 'default'] 40 | if is_running 41 | ] 42 | 43 | 44 | # Marks for the e2e tests (see tests/e2e/test_examples.py): 45 | # We do not care: pods can have 6-10 updates here. 46 | E2E_SUCCESS_COUNTS: dict[str, int] = {} 47 | -------------------------------------------------------------------------------- /kopf/_cogs/clients/fetching.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Collection 2 | 3 | from kopf._cogs.clients import api 4 | from kopf._cogs.configs import configuration 5 | from kopf._cogs.helpers import typedefs 6 | from kopf._cogs.structs import bodies, references 7 | 8 | 9 | async def list_objs( 10 | *, 11 | settings: configuration.OperatorSettings, 12 | resource: references.Resource, 13 | namespace: references.Namespace, 14 | logger: typedefs.Logger, 15 | ) -> tuple[Collection[bodies.RawBody], str]: 16 | """ 17 | List the objects of specific resource type. 18 | 19 | The cluster-scoped call is used in two cases: 20 | 21 | * The resource itself is cluster-scoped, and namespacing makes not sense. 22 | * The operator serves all namespaces for the namespaced custom resource. 23 | 24 | Otherwise, the namespace-scoped call is used: 25 | 26 | * The resource is namespace-scoped AND operator is namespaced-restricted. 27 | """ 28 | rsp = await api.get( 29 | url=resource.get_url(namespace=namespace), 30 | logger=logger, 31 | settings=settings, 32 | ) 33 | 34 | items: list[bodies.RawBody] = [] 35 | resource_version = rsp.get('metadata', {}).get('resourceVersion', None) 36 | for item in rsp.get('items', []): 37 | if 'kind' in rsp: 38 | item.setdefault('kind', rsp['kind'].removesuffix('List')) 39 | if 'apiVersion' in rsp: 40 | item.setdefault('apiVersion', rsp['apiVersion']) 41 | items.append(item) 42 | 43 | return items, resource_version 44 | -------------------------------------------------------------------------------- /kopf/_cogs/aiokits/aiotime.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import collections.abc 3 | from collections.abc import Collection 4 | 5 | 6 | async def sleep( 7 | delays: float | Collection[float | None] | None, 8 | wakeup: asyncio.Event | None = None, 9 | ) -> float | None: 10 | """ 11 | Measure the sleep time: either until the timeout, or until the event is set. 12 | 13 | Returns the number of seconds left to sleep, or ``None`` if the sleep was 14 | not interrupted and reached its specified delay (an equivalent of ``0``). 15 | In theory, the result can be ``0`` if the sleep was interrupted precisely 16 | the last moment before timing out; this is unlikely to happen though. 17 | """ 18 | passed_delays = delays if isinstance(delays, collections.abc.Collection) else [delays] 19 | actual_delays = [delay for delay in passed_delays if delay is not None] 20 | minimal_delay = min(actual_delays) if actual_delays else 0 21 | 22 | # Do not go for the real low-level system sleep if there is no need to sleep. 23 | if minimal_delay <= 0: 24 | return None 25 | 26 | awakening_event = wakeup if wakeup is not None else asyncio.Event() 27 | loop = asyncio.get_running_loop() 28 | try: 29 | start_time = loop.time() 30 | await asyncio.wait_for(awakening_event.wait(), timeout=minimal_delay) 31 | except asyncio.TimeoutError: 32 | return None # interruptable sleep is over: uninterrupted. 33 | else: 34 | end_time = loop.time() 35 | duration = end_time - start_time 36 | return max(0., minimal_delay - duration) 37 | -------------------------------------------------------------------------------- /docs/vision.rst: -------------------------------------------------------------------------------- 1 | ====== 2 | Vision 3 | ====== 4 | 5 | Kubernetes `has become a standard de facto`__ for the enterprise infrastructure 6 | management, especially for microservice-based infrastructures. 7 | 8 | __ https://www.google.com/search?q=kubernetes+standard+de+facto&oq=kuerbenetes+standard+de+facto 9 | 10 | Kubernetes operators have become a common way to extend Kubernetes 11 | with domain objects and domain logic. 12 | 13 | At the moment (2018-2019), operators are mostly written in Go 14 | and require advanced knowledge both of Go and Kubernetes internals. 15 | This raises the entry barrier to the operator development field. 16 | 17 | In a perfect world of Kopf, Kubernetes operators are a commodity, 18 | used to build the domain logic on top of Kubernetes fast and with ease, 19 | requiring little or no skills in infrastructure management. 20 | 21 | For this, Kopf hides the low-level infrastructure details from the user 22 | (i.e. the operator developer), 23 | exposing only the APIs and DSLs needed to express the user's domain. 24 | 25 | Besides, Kopf does this in one of the widely used, easy to learn 26 | programming languages: Python. 27 | 28 | But Kopf does not go too far in abstracting the Kubernetes internals away: 29 | it avoids the introduction of extra entities and controlling structures 30 | (`Occam's Razor`_, `KISS`_), and most likely it will never have 31 | a mapping of Python classes to Kubernetes resources 32 | (like in the ORMs for the relational databases). 33 | 34 | .. _Occam's Razor: https://en.wikipedia.org/wiki/Occam%27s_razor 35 | .. _KISS: https://en.wikipedia.org/wiki/KISS_principle 36 | -------------------------------------------------------------------------------- /docs/tips-and-tricks.rst: -------------------------------------------------------------------------------- 1 | ============= 2 | Tips & Tricks 3 | ============= 4 | 5 | 6 | .. _never-again-filters: 7 | 8 | Excluding handlers forever 9 | ========================== 10 | 11 | Both successful executions and permanent errors of change-detecting handlers 12 | only exclude these handlers from the current handling cycle, which is scoped 13 | to the current change-set (i.e. one diff of an object). 14 | On the next change, the handlers will be invoked again, regardless of their 15 | previous permanent error. 16 | 17 | The same is valid for the daemons: they will be spawned on the next operator 18 | restart (assuming that one operator process is one handling cycle for daemons). 19 | 20 | To prevent handlers or daemons from being invoked for a specific resource 21 | ever again, even after the operator restarts, use annotations and filters 22 | (or the same for labels or arbitrary fields with ``when=`` callback filtering): 23 | 24 | .. code-block:: python 25 | 26 | import kopf 27 | 28 | @kopf.on.update('kopfexamples', annotations={'update-fn-never-again': kopf.ABSENT}) 29 | def update_fn(patch, **_): 30 | patch.metadata.annotations['update-fn-never-again'] = 'yes' 31 | raise kopf.PermanentError("Never call update-fn again.") 32 | 33 | @kopf.daemon('kopfexamples', annotations={'monitor-never-again': kopf.ABSENT}) 34 | async def monitor_kex(patch, **kwargs): 35 | patch.metadata.annotations['monitor-never-again'] = 'yes' 36 | 37 | Such a never-again exclusion might be implemented as a feature of Kopf one day, 38 | but it is not available now -- if not done explicitly as shown above. 39 | -------------------------------------------------------------------------------- /docs/async.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | Async/Await 3 | =========== 4 | 5 | .. todo:: Fit this page into the walk-through sample story? 6 | 7 | Kopf supports asynchronous handler functions:: 8 | 9 | import asyncio 10 | import kopf 11 | 12 | @kopf.on.create('kopfexamples') 13 | async def create_fn(spec, **_): 14 | await asyncio.sleep(1.0) 15 | 16 | Async functions have an additional benefit over the non-async ones 17 | to make the full stack trace available when exceptions occur 18 | or IDE breakpoints are used since the async functions are executed 19 | directly inside of Kopf's event loop in the main thread. 20 | 21 | Regular synchronous handlers, despite supported for convenience, 22 | are executed in parallel threads (via the default executor of the loop), 23 | and can only see the stack traces up to the thread entry point. 24 | 25 | .. warning:: 26 | As with any async coroutines, it is the developer's responsibility 27 | to make sure that all the internal function calls are either 28 | ``await``\s of other async coroutines (e.g. ``await asyncio.sleep()``), 29 | or the regular non-blocking functions calls. 30 | 31 | Calling a synchronous function (e.g. HTTP API calls or ``time.sleep()``) 32 | inside of an asynchronous function will block the whole operator process 33 | until the synchronous call if finished, i.e. even other resources 34 | processed in parallel, and the Kubernetes event-watching/-queueing cycles. 35 | 36 | This can come unnoticed in the development environment 37 | with only a few resources and no external timeouts, 38 | but can hit hard in the production environments with high load. 39 | -------------------------------------------------------------------------------- /docs/testing.rst: -------------------------------------------------------------------------------- 1 | ================ 2 | Operator testing 3 | ================ 4 | 5 | Kopf provides some tools to test the Kopf-based operators 6 | via :mod:`kopf.testing` module (requires explicit importing). 7 | 8 | 9 | Background runner 10 | ================= 11 | 12 | :class:`kopf.testing.KopfRunner` runs an arbitrary operator in the background, 13 | while the original testing thread does the object manipulation and assertions: 14 | 15 | When the ``with`` block exits, the operator stops, and its exceptions, 16 | exit code and output are available to the test (for additional assertions). 17 | 18 | .. code-block:: python 19 | :caption: test_example_operator.py 20 | 21 | import time 22 | import subprocess 23 | from kopf.testing import KopfRunner 24 | 25 | def test_operator(): 26 | with KopfRunner(['run', '-A', '--verbose', 'examples/01-minimal/example.py']) as runner: 27 | # do something while the operator is running. 28 | 29 | subprocess.run("kubectl apply -f examples/obj.yaml", shell=True, check=True) 30 | time.sleep(1) # give it some time to react and to sleep and to retry 31 | 32 | subprocess.run("kubectl delete -f examples/obj.yaml", shell=True, check=True) 33 | time.sleep(1) # give it some time to react 34 | 35 | assert runner.exit_code == 0 36 | assert runner.exception is None 37 | assert 'And here we are!' in runner.output 38 | assert 'Deleted, really deleted' in runner.output 39 | 40 | .. note:: 41 | The operator runs against the cluster which is currently authenticated --- 42 | same as if would be executed with `kopf run`. 43 | -------------------------------------------------------------------------------- /kopf/_cogs/aiokits/aiovalues.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from collections.abc import AsyncIterator, Collection 3 | from typing import Generic, TypeVar 4 | 5 | _T = TypeVar('_T') 6 | 7 | 8 | class Container(Generic[_T]): 9 | 10 | def __init__(self) -> None: 11 | super().__init__() 12 | self.changed = asyncio.Condition() 13 | self._values: Collection[_T] = [] # 0..1 item 14 | 15 | def get_nowait(self) -> _T: # used mostly in testing 16 | try: 17 | return next(iter(self._values)) 18 | except StopIteration: 19 | raise LookupError("No value is stored in the container.") from None 20 | 21 | async def set(self, value: _T) -> None: 22 | async with self.changed: 23 | self._values = [value] 24 | self.changed.notify_all() 25 | 26 | async def wait(self) -> _T: 27 | async with self.changed: 28 | await self.changed.wait_for(lambda: self._values) 29 | try: 30 | return next(iter(self._values)) 31 | except StopIteration: # impossible because of the condition's predicate 32 | raise LookupError("No value is stored in the container.") from None 33 | 34 | async def reset(self) -> None: 35 | async with self.changed: 36 | self._values = [] 37 | self.changed.notify_all() 38 | 39 | async def as_changed(self) -> AsyncIterator[_T]: 40 | async with self.changed: 41 | while True: 42 | try: 43 | yield next(iter(self._values)) 44 | except StopIteration: 45 | pass 46 | await self.changed.wait() 47 | -------------------------------------------------------------------------------- /tests/handling/daemons/test_timer_triggering.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import kopf 4 | 5 | 6 | async def test_timer_is_spawned_at_least_once( 7 | resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): 8 | caplog.set_level(logging.DEBUG) 9 | 10 | @kopf.timer(*resource, id='fn', interval=1.0) 11 | async def fn(**kwargs): 12 | dummy.mock() 13 | dummy.kwargs = kwargs 14 | dummy.steps['called'].set() 15 | kwargs['stopped']._setter.set() # to exit the cycle 16 | 17 | await simulate_cycle({}) 18 | await dummy.steps['called'].wait() 19 | 20 | assert dummy.mock.call_count == 1 21 | assert dummy.kwargs['retry'] == 0 22 | assert k8s_mocked.sleep.call_count == 1 23 | assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 24 | 25 | await dummy.wait_for_daemon_done() 26 | 27 | 28 | async def test_timer_initial_delay_obeyed( 29 | resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): 30 | caplog.set_level(logging.DEBUG) 31 | 32 | @kopf.timer(*resource, id='fn', initial_delay=5.0, interval=1.0) 33 | async def fn(**kwargs): 34 | dummy.mock() 35 | dummy.kwargs = kwargs 36 | dummy.steps['called'].set() 37 | kwargs['stopped']._setter.set() # to exit the cycle 38 | 39 | await simulate_cycle({}) 40 | await dummy.steps['called'].wait() 41 | 42 | assert dummy.mock.call_count == 1 43 | assert dummy.kwargs['retry'] == 0 44 | assert k8s_mocked.sleep.call_count == 2 45 | assert k8s_mocked.sleep.call_args_list[0][0][0] == 5.0 46 | assert k8s_mocked.sleep.call_args_list[1][0][0] == 1.0 47 | 48 | await dummy.wait_for_daemon_done() 49 | -------------------------------------------------------------------------------- /tests/admission/test_webhook_ngrok.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import pytest 4 | 5 | from kopf._kits.webhooks import WebhookNgrokTunnel 6 | 7 | 8 | async def test_missing_pyngrok(no_pyngrok, responder): 9 | with pytest.raises(ImportError) as err: 10 | server = WebhookNgrokTunnel() 11 | async with server: 12 | async for _ in server(responder.fn): 13 | break # do not sleep 14 | assert "pip install pyngrok" in str(err.value) 15 | 16 | 17 | async def test_ngrok_tunnel( 18 | certfile, pkeyfile, responder, pyngrok_mock): 19 | 20 | responder.fut.set_result({'hello': 'world'}) 21 | server = WebhookNgrokTunnel(port=54321, path='/p1/p2', 22 | region='xx', token='xyz', binary='/bin/ngrok') 23 | async with server: 24 | async for client_config in server(responder.fn): 25 | assert 'caBundle' not in client_config # trust the default CA 26 | assert client_config['url'] == 'https://nowhere/p1/p2' 27 | break # do not sleep 28 | 29 | assert pyngrok_mock.conf.get_default.called 30 | assert pyngrok_mock.conf.get_default.return_value.ngrok_path == '/bin/ngrok' 31 | assert pyngrok_mock.conf.get_default.return_value.region == 'xx' 32 | assert pyngrok_mock.ngrok.set_auth_token.called 33 | assert pyngrok_mock.ngrok.set_auth_token.call_args_list[0][0][0] == 'xyz' 34 | assert pyngrok_mock.ngrok.connect.called 35 | assert pyngrok_mock.ngrok.connect.call_args_list[0][0][0] == '54321' 36 | assert pyngrok_mock.ngrok.connect.call_args_list[0][1]['bind_tls'] == True 37 | assert pyngrok_mock.ngrok.disconnect.called 38 | 39 | await asyncio.get_running_loop().shutdown_asyncgens() 40 | -------------------------------------------------------------------------------- /tests/dicts/test_walking.py: -------------------------------------------------------------------------------- 1 | from kopf._cogs.structs.dicts import walk 2 | 3 | 4 | def test_over_a_none(): 5 | result = list(walk(None)) 6 | assert len(result) == 0 7 | 8 | 9 | def test_over_a_dict(): 10 | obj = {} 11 | result = list(walk(obj)) 12 | assert len(result) == 1 13 | assert result[0] is obj 14 | 15 | 16 | def test_over_a_list_of_dicts(): 17 | obj1 = {} 18 | obj2 = {} 19 | result = list(walk([obj1, obj2])) 20 | assert len(result) == 2 21 | assert result[0] is obj1 22 | assert result[1] is obj2 23 | 24 | 25 | def test_over_a_tuple_of_dicts(): 26 | obj1 = {} 27 | obj2 = {} 28 | result = list(walk((obj1, obj2))) 29 | assert len(result) == 2 30 | assert result[0] is obj1 31 | assert result[1] is obj2 32 | 33 | 34 | def test_none_is_ignored(): 35 | obj1 = {} 36 | obj2 = {} 37 | result = list(walk([obj1, None, obj2])) 38 | assert len(result) == 2 39 | assert result[0] is obj1 40 | assert result[1] is obj2 41 | 42 | 43 | def test_simple_nested(): 44 | obj1 = {'field': {'subfield': 'val'}} 45 | obj2 = {'field': {}} 46 | result = list(walk([obj1, obj2], nested=['field.subfield'])) 47 | assert len(result) == 3 48 | assert result[0] is obj1 49 | assert result[1] == 'val' 50 | assert result[2] is obj2 51 | 52 | 53 | def test_double_nested(): 54 | obj1 = {'field': {'subfield': 'val'}} 55 | obj2 = {'field': {}} 56 | result = list(walk([obj1, obj2], nested=['field.subfield', 'field'])) 57 | assert len(result) == 5 58 | assert result[0] is obj1 59 | assert result[1] == 'val' 60 | assert result[2] == {'subfield': 'val'} 61 | assert result[3] is obj2 62 | assert result[4] == {} 63 | -------------------------------------------------------------------------------- /kopf/_cogs/structs/finalizers.py: -------------------------------------------------------------------------------- 1 | """ 2 | All the functions to manipulate the object finalization and deletion. 3 | 4 | Finalizers are used to block the actual deletion until the finalizers 5 | are removed, meaning that the operator has done all its duties 6 | to "release" the object (e.g. cleanups; delete-handlers in our case). 7 | """ 8 | from kopf._cogs.structs import bodies, patches 9 | 10 | 11 | def is_deletion_ongoing( 12 | body: bodies.Body, 13 | ) -> bool: 14 | return body.get('metadata', {}).get('deletionTimestamp', None) is not None 15 | 16 | 17 | def is_deletion_blocked( 18 | body: bodies.Body, 19 | finalizer: str, 20 | ) -> bool: 21 | finalizers = body.get('metadata', {}).get('finalizers', []) 22 | return finalizer in finalizers 23 | 24 | 25 | def block_deletion( 26 | *, 27 | body: bodies.Body, 28 | patch: patches.Patch, 29 | finalizer: str, 30 | ) -> None: 31 | if not is_deletion_blocked(body=body, finalizer=finalizer): 32 | finalizers = body.get('metadata', {}).get('finalizers', []) 33 | patch.setdefault('metadata', {}).setdefault('finalizers', list(finalizers)) 34 | patch['metadata']['finalizers'].append(finalizer) 35 | 36 | 37 | def allow_deletion( 38 | *, 39 | body: bodies.Body, 40 | patch: patches.Patch, 41 | finalizer: str, 42 | ) -> None: 43 | if is_deletion_blocked(body=body, finalizer=finalizer): 44 | finalizers = body.get('metadata', {}).get('finalizers', []) 45 | patch.setdefault('metadata', {}).setdefault('finalizers', list(finalizers)) 46 | if finalizer in patch['metadata']['finalizers']: 47 | patch['metadata']['finalizers'].remove(finalizer) 48 | -------------------------------------------------------------------------------- /tests/e2e/conftest.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os.path 3 | import pathlib 4 | import subprocess 5 | 6 | import pytest 7 | 8 | from kopf._core.intents.registries import SmartOperatorRegistry 9 | 10 | root_dir = os.path.relpath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) 11 | examples = sorted(glob.glob(os.path.join(root_dir, 'examples/*/'))) 12 | assert examples # if empty, it is just the detection failed 13 | examples = [path for path in examples if not glob.glob(os.path.join(path, 'test*.py'))] 14 | 15 | 16 | @pytest.fixture 17 | def registry_factory(): 18 | # Authentication is needed for the real e2e tests. 19 | return SmartOperatorRegistry 20 | 21 | 22 | @pytest.fixture(params=examples, ids=[os.path.basename(path.rstrip('/')) for path in examples]) 23 | def exampledir(request): 24 | return pathlib.Path(request.param) 25 | 26 | 27 | @pytest.fixture() 28 | def with_crd(): 29 | # Our best guess on which Kubernetes version we are running on. 30 | subprocess.run(f"kubectl apply -f examples/crd.yaml", 31 | shell=True, check=True, timeout=10, capture_output=True) 32 | 33 | 34 | @pytest.fixture() 35 | def with_peering(): 36 | subprocess.run(f"kubectl apply -f peering.yaml", 37 | shell=True, check=True, timeout=10, capture_output=True) 38 | 39 | 40 | @pytest.fixture() 41 | def no_crd(): 42 | subprocess.run("kubectl delete customresourcedefinition kopfexamples.kopf.dev", 43 | shell=True, check=True, timeout=10, capture_output=True) 44 | 45 | 46 | @pytest.fixture() 47 | def no_peering(): 48 | subprocess.run("kubectl delete customresourcedefinition kopfpeerings.kopf.dev", 49 | shell=True, check=True, timeout=10, capture_output=True) 50 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Kopf 2 | 3 | **Thank you for your interest in Kopf. Your contributions are highly welcome.** 4 | 5 | There are multiple ways of getting involved: 6 | 7 | - [Report a bug](#report-a-bug) 8 | - [Suggest a feature](#suggest-a-feature) 9 | - [Contribute code](#contribute-code) 10 | 11 | Below are a few guidelines we would like you to follow. 12 | If you need help, please reach out to us by opening an issue. 13 | 14 | 15 | ## Report a bug 16 | 17 | Reporting bugs is one of the best ways to contribute. Before creating a bug report, please check that an [issue](/issues) reporting the same problem does not already exist. If there is such an issue, you may add your information as a comment. 18 | 19 | To report a new bug you should open an issue that summarizes the bug and set the label to "bug". 20 | 21 | If you want to provide a fix along with your bug report: That is great! In this case please send us a pull request as described in section [Contribute Code](#contribute-code). 22 | 23 | 24 | ## Suggest a feature 25 | 26 | To request a new feature you should open an [issue](../../issues/new) and summarize the desired functionality and its use case. Set the issue label to "feature". 27 | 28 | 29 | ## Contribute code 30 | 31 | Check the list of open [issues](../../issues). 32 | Either assign an existing issue to yourself, or create a new one 33 | that you would like work on and discuss your ideas and use cases. 34 | It is always best to discuss your plans beforehand, 35 | to ensure that your contribution is in line with our goals. 36 | 37 | Read https://kopf.readthedocs.io/en/stable/contributing/ 38 | for detailed information, conventions and guidelines. 39 | 40 | Thanks for your contributions! 41 | 42 | **Have fun, and happy hacking!** 43 | -------------------------------------------------------------------------------- /examples/11-filtering-handlers/README.md: -------------------------------------------------------------------------------- 1 | # Kopf example for testing the filtering of handlers 2 | 3 | Kopf has the ability to execute handlers only if the watched objects 4 | match the filters passed to the handler. This includes matching on: 5 | * labels of a resource 6 | * annotations of a resource 7 | 8 | Start the operator: 9 | 10 | ```bash 11 | kopf run example.py 12 | ``` 13 | 14 | Trigger the object creation and monitor the stderr of the operator: 15 | 16 | ```bash 17 | $ kubectl apply -f ../obj.yaml 18 | ``` 19 | 20 | ``` 21 | [2019-07-04 14:19:33,393] kopf.reactor.handlin [INFO ] [default/kopf-example-1] Label satisfied. 22 | [2019-07-04 14:19:33,395] kopf.reactor.handlin [INFO ] [default/kopf-example-1] Handler 'create_with_labels_satisfied' succeeded. 23 | [2019-07-04 14:19:33,648] kopf.reactor.handlin [INFO ] [default/kopf-example-1] Label exists. 24 | [2019-07-04 14:19:33,649] kopf.reactor.handlin [INFO ] [default/kopf-example-1] Handler 'create_with_labels_exist' succeeded. 25 | [2019-07-04 14:19:33,807] kopf.reactor.handlin [INFO ] [default/kopf-example-1] Annotation satisfied. 26 | [2019-07-04 14:19:33,809] kopf.reactor.handlin [INFO ] [default/kopf-example-1] Handler 'create_with_annotations_satisfied' succeeded. 27 | [2019-07-04 14:19:33,966] kopf.reactor.handlin [INFO ] [default/kopf-example-1] Annotation exists. 28 | [2019-07-04 14:19:33,967] kopf.reactor.handlin [INFO ] [default/kopf-example-1] Handler 'create_with_annotations_exist' succeeded. 29 | [2019-07-04 14:19:33,967] kopf.reactor.handlin [INFO ] [default/kopf-example-1] All handlers succeeded for creation. 30 | ``` 31 | 32 | Here, notice that only the handlers that have labels or annotations that match the applied 33 | object are executed, and the ones that don't, aren't. 34 | -------------------------------------------------------------------------------- /kopf/_cogs/helpers/loaders.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module- and file-loading to trigger the handlers to be registered. 3 | 4 | Since the framework is based on the decorators to register the handlers, 5 | the files/modules with these handlers should be loaded first, 6 | thus executing the decorators. 7 | 8 | The files/modules to be loaded are usually specified on the command-line. 9 | Currently, two loading modes are supported, both are equivalent to Python CLI: 10 | 11 | * Plain files files (`kopf run file.py`). 12 | * Importable modules (`kopf run -m pkg.mod`). 13 | 14 | Multiple files/modules can be specified. They will be loaded in the order. 15 | """ 16 | 17 | import importlib 18 | import importlib.abc 19 | import importlib.util 20 | import os.path 21 | import sys 22 | from collections.abc import Iterable 23 | from typing import cast 24 | 25 | 26 | def preload( 27 | paths: Iterable[str], 28 | modules: Iterable[str], 29 | ) -> None: 30 | """ 31 | Ensure the handlers are registered by loading/importing the files/modules. 32 | """ 33 | 34 | for idx, path in enumerate(paths): 35 | sys.path.insert(0, os.path.abspath(os.path.dirname(path))) 36 | name = f'__kopf_script_{idx}__{path}' # same pseudo-name as '__main__' 37 | spec = importlib.util.spec_from_file_location(name, path) 38 | module = importlib.util.module_from_spec(spec) if spec is not None else None 39 | loader = cast(importlib.abc.Loader, spec.loader) if spec is not None else None 40 | if module is not None and loader is not None: 41 | sys.modules[name] = module 42 | loader.exec_module(module) 43 | else: 44 | raise ImportError(f"Failed loading {path}: no module or loader.") 45 | 46 | for name in modules: 47 | importlib.import_module(name) 48 | -------------------------------------------------------------------------------- /examples/14-daemons/example.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import time 3 | 4 | import kopf 5 | 6 | 7 | # Sync daemons in threads are non-interruptable, they must check for the `stopped` flag. 8 | # This daemon exits after 3 attempts and then 30 seconds of running (unless stopped). 9 | @kopf.daemon('kopfexamples', backoff=3) 10 | def background_sync(stopped: kopf.DaemonStopped, spec, logger, retry, patch, **_): 11 | if retry < 3: 12 | patch.status['message'] = f"Failed {retry+1} times." 13 | raise kopf.TemporaryError("Simulated failure.", delay=1) 14 | 15 | started = time.time() 16 | while not stopped and time.time() - started <= 30: 17 | logger.info(f"=> Ping from a sync daemon: field={spec['field']!r}, retry={retry}") 18 | stopped.wait(5.0) 19 | 20 | patch.status['message'] = "Accompanying is finished." 21 | 22 | 23 | # Async daemons do not need the `stopped` signal, they can rely on `asyncio.CancelledError` raised. 24 | # This daemon runs forever (until stopped, i.e. cancelled). Yet fails to start for 3 first times. 25 | @kopf.daemon('kopfexamples', backoff=3, cancellation_backoff=1.0, cancellation_timeout=0.5, 26 | annotations={'someannotation': 'somevalue'}) 27 | async def background_async(spec, logger, retry, **_): 28 | if retry < 3: 29 | raise kopf.TemporaryError("Simulated failure.", delay=1) 30 | 31 | while True: 32 | logger.info(f"=> Ping from an async daemon: field={spec['field']!r}") 33 | await asyncio.sleep(5.0) 34 | 35 | 36 | # Marks for the e2e tests (see tests/e2e/test_examples.py): 37 | E2E_CREATION_STOP_WORDS = ["=> Ping from"] 38 | E2E_DELETION_STOP_WORDS = ["'background_async' is cancelled", 39 | "'background_sync' is cancelled", 40 | "'background_async' has exited"] 41 | -------------------------------------------------------------------------------- /kopf/_core/intents/stoppers.py: -------------------------------------------------------------------------------- 1 | import enum 2 | 3 | from kopf._cogs.aiokits import aioenums 4 | 5 | 6 | class DaemonStoppingReason(enum.Flag): 7 | """ 8 | A reason or reasons of daemon being terminated. 9 | 10 | Daemons are signalled to exit usually for two reasons: the operator itself 11 | is exiting or restarting, so all daemons of all resources must stop; 12 | or the individual resource was deleted, but the operator continues running. 13 | 14 | No matter the reason, the daemons must exit, so one and only one stop-flag 15 | is used. Some daemons can check the reason of exiting if it is important. 16 | 17 | There can be multiple reasons combined (in rare cases, all of them). 18 | """ 19 | DONE = enum.auto() # whatever the reason and the status, the asyncio task has exited. 20 | FILTERS_MISMATCH = enum.auto() # the resource does not match the filters anymore. 21 | RESOURCE_DELETED = enum.auto() # the resource was deleted, the asyncio task is still awaited. 22 | OPERATOR_PAUSING = enum.auto() # the operator is pausing, the asyncio task is still awaited. 23 | OPERATOR_EXITING = enum.auto() # the operator is exiting, the asyncio task is still awaited. 24 | DAEMON_SIGNALLED = enum.auto() # the stopper flag was set, the asyncio task is still awaited. 25 | DAEMON_CANCELLED = enum.auto() # the asyncio task was cancelled, the thread can be running. 26 | DAEMON_ABANDONED = enum.auto() # we gave up on the asyncio task, the thread can be running. 27 | 28 | 29 | DaemonStopper = aioenums.FlagSetter[DaemonStoppingReason] 30 | DaemonStopped = aioenums.FlagWaiter[DaemonStoppingReason] 31 | SyncDaemonStopperChecker = aioenums.SyncFlagWaiter[DaemonStoppingReason] # deprecated 32 | AsyncDaemonStopperChecker = aioenums.AsyncFlagWaiter[DaemonStoppingReason] # deprecated 33 | -------------------------------------------------------------------------------- /tests/cli/conftest.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import sys 3 | 4 | import click.testing 5 | import pytest 6 | 7 | from kopf.cli import main 8 | 9 | SCRIPT1 = """ 10 | import kopf 11 | 12 | @kopf.on.create('kopfexamples') 13 | def create_fn(spec, **_): 14 | print('Hello from create_fn!') 15 | print(repr(spec)) 16 | """ 17 | 18 | SCRIPT2 = """ 19 | import kopf 20 | 21 | @kopf.on.update('kopfexamples') 22 | def update_fn(spec, **_): 23 | print('Hello from create_fn!') 24 | print(repr(spec)) 25 | """ 26 | 27 | 28 | @pytest.fixture(autouse=True) 29 | def srcdir(tmpdir): 30 | tmpdir.join('handler1.py').write(SCRIPT1) 31 | tmpdir.join('handler2.py').write(SCRIPT2) 32 | pkgdir = tmpdir.mkdir('package') 33 | pkgdir.join('__init__.py').write('') 34 | pkgdir.join('module_1.py').write(SCRIPT1) 35 | pkgdir.join('module_2.py').write(SCRIPT2) 36 | 37 | sys.path.insert(0, str(tmpdir)) 38 | try: 39 | with tmpdir.as_cwd(): 40 | yield tmpdir 41 | finally: 42 | sys.path.remove(str(tmpdir)) 43 | 44 | 45 | @pytest.fixture(autouse=True) 46 | def clean_modules_cache(): 47 | # Otherwise, the first loaded test-modules remain there forever, 48 | # preventing 2nd and further tests from passing. 49 | for key in list(sys.modules.keys()): 50 | if key.startswith('package'): 51 | del sys.modules[key] 52 | 53 | 54 | @pytest.fixture() 55 | def runner(): 56 | runner = click.testing.CliRunner() 57 | return runner 58 | 59 | 60 | @pytest.fixture() 61 | def invoke(runner): 62 | return functools.partial(runner.invoke, main) 63 | 64 | 65 | @pytest.fixture() 66 | def preload(mocker): 67 | return mocker.patch('kopf._cogs.helpers.loaders.preload') 68 | 69 | 70 | @pytest.fixture() 71 | def real_run(mocker): 72 | return mocker.patch('kopf._core.reactor.running.run') 73 | -------------------------------------------------------------------------------- /examples/10-builtins/test_example_10.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import time 3 | 4 | import kopf.testing 5 | 6 | 7 | def test_pods_reacted(): 8 | 9 | example_py = os.path.join(os.path.dirname(__file__), 'example.py') 10 | with kopf.testing.KopfRunner( 11 | ['run', '--all-namespaces', '--standalone', '--verbose', example_py], 12 | timeout=60, 13 | ) as runner: 14 | name = _create_pod() 15 | time.sleep(5) # give it some time to react 16 | _delete_pod(name) 17 | time.sleep(1) # give it some time to react 18 | 19 | assert runner.exception is None 20 | assert runner.exit_code == 0 21 | 22 | assert f'[default/{name}] Creation is in progress:' in runner.output 23 | assert f'[default/{name}] === Pod killing happens in 30s.' in runner.output 24 | assert f'[default/{name}] Deletion is in progress:' in runner.output 25 | assert f'[default/{name}] === Pod killing is cancelled!' in runner.output 26 | 27 | 28 | def _create_pod(): 29 | import pykube 30 | api = pykube.HTTPClient(pykube.KubeConfig.from_file()) 31 | with api.session: 32 | pod = pykube.Pod(api, { 33 | 'apiVersion': 'v1', 34 | 'kind': 'Pod', 35 | 'metadata': {'generateName': 'kopf-pod-', 'namespace': 'default'}, 36 | 'spec': { 37 | 'containers': [{ 38 | 'name': 'the-only-one', 39 | 'image': 'busybox', 40 | 'command': ["sh", "-x", "-c", "sleep 1"], 41 | }]}, 42 | }) 43 | pod.create() 44 | return pod.name 45 | 46 | 47 | def _delete_pod(name): 48 | import pykube 49 | api = pykube.HTTPClient(pykube.KubeConfig.from_file()) 50 | with api.session: 51 | pod = pykube.Pod.objects(api, namespace='default').get_by_name(name) 52 | pod.delete() 53 | -------------------------------------------------------------------------------- /tests/basic-structs/test_memories.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import Mock 2 | 3 | from kopf._cogs.structs.bodies import Body 4 | from kopf._cogs.structs.ephemera import Memo 5 | from kopf._core.reactor.inventory import ResourceMemories, ResourceMemory 6 | 7 | BODY: Body = { 8 | 'metadata': { 9 | 'uid': 'uid1', 10 | } 11 | } 12 | 13 | 14 | def test_creation_with_defaults(): 15 | ResourceMemory() 16 | 17 | 18 | async def test_recalling_creates_when_absent(): 19 | memories = ResourceMemories() 20 | memory = await memories.recall(BODY) 21 | assert isinstance(memory, ResourceMemory) 22 | 23 | 24 | async def test_recalling_reuses_when_present(): 25 | memories = ResourceMemories() 26 | memory1 = await memories.recall(BODY) 27 | memory2 = await memories.recall(BODY) 28 | assert memory1 is memory2 29 | 30 | 31 | async def test_forgetting_deletes_when_present(): 32 | memories = ResourceMemories() 33 | memory1 = await memories.recall(BODY) 34 | await memories.forget(BODY) 35 | 36 | # Check by recalling -- it should be a new one. 37 | memory2 = await memories.recall(BODY) 38 | assert memory1 is not memory2 39 | 40 | 41 | async def test_forgetting_ignores_when_absent(): 42 | memories = ResourceMemories() 43 | await memories.forget(BODY) 44 | 45 | 46 | async def test_memo_is_autocreated(): 47 | memories = ResourceMemories() 48 | memory = await memories.recall(BODY) 49 | assert isinstance(memory.memo, Memo) 50 | 51 | 52 | async def test_memo_is_shallow_copied(): 53 | 54 | class MyMemo(Memo): 55 | def __copy__(self): 56 | mock() 57 | return MyMemo() 58 | 59 | mock = Mock() 60 | memobase = MyMemo() 61 | memories = ResourceMemories() 62 | memory = await memories.recall(BODY, memobase=memobase) 63 | assert mock.call_count == 1 64 | assert memory.memo is not memobase 65 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Kopf: Kubernetes Operators Framework 2 | ==================================== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | :caption: First steps: 7 | 8 | install 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | :caption: Tutorial: 13 | 14 | concepts 15 | walkthrough/problem 16 | walkthrough/prerequisites 17 | walkthrough/resources 18 | walkthrough/starting 19 | walkthrough/creation 20 | walkthrough/updates 21 | walkthrough/diffs 22 | walkthrough/deletion 23 | walkthrough/cleanup 24 | 25 | .. toctree:: 26 | :maxdepth: 2 27 | :caption: Resource handling: 28 | 29 | handlers 30 | daemons 31 | timers 32 | kwargs 33 | async 34 | loading 35 | resources 36 | filters 37 | results 38 | errors 39 | scopes 40 | memos 41 | indexing 42 | admission 43 | 44 | .. toctree:: 45 | :maxdepth: 2 46 | :caption: Operator handling: 47 | 48 | startup 49 | shutdown 50 | probing 51 | authentication 52 | configuration 53 | peering 54 | cli 55 | 56 | .. toctree:: 57 | :maxdepth: 2 58 | :caption: Toolkits: 59 | 60 | events 61 | hierarchies 62 | testing 63 | embedding 64 | 65 | .. toctree:: 66 | :maxdepth: 2 67 | :caption: Recipes: 68 | 69 | deployment 70 | continuity 71 | idempotence 72 | reconciliation 73 | tips-and-tricks 74 | troubleshooting 75 | 76 | .. toctree:: 77 | :maxdepth: 2 78 | :caption: Developer Manual: 79 | 80 | minikube 81 | contributing 82 | architecture 83 | packages/kopf 84 | 85 | .. toctree:: 86 | :maxdepth: 2 87 | :caption: About Kopf: 88 | 89 | vision 90 | naming 91 | alternatives 92 | 93 | 94 | Indices and tables 95 | ================== 96 | 97 | * :ref:`genindex` 98 | * :ref:`modindex` 99 | * :ref:`search` 100 | -------------------------------------------------------------------------------- /tests/admission/test_certificates.py: -------------------------------------------------------------------------------- 1 | import certvalidator 2 | import pytest 3 | 4 | from kopf._kits.webhooks import WebhookServer 5 | 6 | 7 | def test_missing_oscrypto(no_oscrypto): 8 | with pytest.raises(ImportError) as err: 9 | WebhookServer.build_certificate(['...']) 10 | assert "pip install certbuilder" in str(err.value) 11 | 12 | 13 | def test_missing_certbuilder(no_certbuilder): 14 | with pytest.raises(ImportError) as err: 15 | WebhookServer.build_certificate(['...']) 16 | assert "pip install certbuilder" in str(err.value) 17 | 18 | 19 | def test_certificate_generation(): 20 | names = ['hostname1', 'hostname2', '1.2.3.4', '0:0:0:0:0:0:0:1'] 21 | cert, pkey = WebhookServer.build_certificate(names) 22 | context = certvalidator.ValidationContext(extra_trust_roots=[cert]) 23 | validator = certvalidator.CertificateValidator(cert, validation_context=context) 24 | path = validator.validate_tls('hostname1') 25 | assert len(path) == 1 # self-signed 26 | assert path.first.ca 27 | assert path.first.self_issued 28 | assert set(path.first.valid_domains) == {'hostname1', 'hostname2', '1.2.3.4', '::1'} 29 | assert set(path.first.valid_ips) == {'1.2.3.4', '::1'} 30 | 31 | 32 | @pytest.mark.parametrize('hostnames, common_name', [ 33 | (['hostname1', 'hostname2'], 'hostname1'), 34 | (['hostname2', 'hostname1'], 'hostname2'), 35 | (['1.2.3.4', 'hostname1'], 'hostname1'), 36 | (['1.2.3.4', '2.3.4.5'], '1.2.3.4'), 37 | ]) 38 | def test_common_name_selection(hostnames, common_name): 39 | cert, pkey = WebhookServer.build_certificate(hostnames) 40 | context = certvalidator.ValidationContext(extra_trust_roots=[cert]) 41 | validator = certvalidator.CertificateValidator(cert, validation_context=context) 42 | path = validator.validate_tls(common_name) 43 | assert path.first.subject.native['common_name'] == common_name 44 | -------------------------------------------------------------------------------- /tests/settings/test_executor.py: -------------------------------------------------------------------------------- 1 | import concurrent.futures 2 | import threading 3 | from unittest.mock import MagicMock 4 | 5 | import kopf 6 | from kopf._core.actions.invocation import invoke 7 | 8 | 9 | class CatchyExecutor(concurrent.futures.ThreadPoolExecutor): 10 | 11 | def __init__(self, *args, **kwargs): 12 | super().__init__(*args, **kwargs) 13 | self.calls = [] 14 | 15 | def submit(self, fn, *args, **kwargs): 16 | self.calls.append(fn) 17 | return super().submit(fn, *args, **kwargs) 18 | 19 | 20 | async def test_synchronous_calls_are_threaded(): 21 | settings = kopf.OperatorSettings() 22 | thread = None 23 | 24 | def fn(): 25 | nonlocal thread 26 | thread = threading.current_thread() 27 | 28 | mock = MagicMock(wraps=fn) 29 | await invoke(fn=mock, settings=settings) 30 | 31 | assert mock.called 32 | assert thread is not None # remembered from inside fn() 33 | assert thread is not threading.current_thread() # not in the main thread 34 | 35 | 36 | async def test_synchronous_calls_use_replaced_executor(): 37 | settings = kopf.OperatorSettings() 38 | executor = CatchyExecutor() 39 | settings.execution.executor = executor 40 | 41 | mock = MagicMock() 42 | await invoke(fn=mock, settings=settings) 43 | 44 | assert mock.called 45 | assert len(executor.calls) == 1 46 | 47 | 48 | async def test_synchronous_executor_limit_is_applied(): 49 | settings = kopf.OperatorSettings() 50 | assert hasattr(settings.execution.executor, '_max_workers') # prerequisite 51 | 52 | assert settings.execution.max_workers is None # as in "unset by us, assume defaults" 53 | assert settings.execution.executor._max_workers is not None # usually CPU count * N. 54 | 55 | settings.execution.max_workers = 123456 56 | 57 | assert settings.execution.max_workers == 123456 58 | assert settings.execution.executor._max_workers == 123456 59 | -------------------------------------------------------------------------------- /examples/12-embedded/README.md: -------------------------------------------------------------------------------- 1 | # Kopf example for embedded operator 2 | 3 | Kopf operators can be embedded into arbitrary applications, such as UI; 4 | or they can be orchestrated explicitly by the developers instead of `kopf run`. 5 | 6 | In this example, we start the operator in a side thread, while simulating 7 | an application activity in the main thread. In this case, the "application" 8 | just creates and deletes the example objects, but it can be any activity. 9 | 10 | Start the operator: 11 | 12 | ```bash 13 | python example.py 14 | ``` 15 | 16 | Let it run for 6 seconds (mostly due to sleeps: 3 times by 1+1 second). 17 | Here is what it will print (shortened; the actual output is more verbose): 18 | 19 | ``` 20 | Starting the main app. 21 | 22 | [DEBUG ] Pykube is configured via kubeconfig file. 23 | [DEBUG ] Client is configured via kubeconfig file. 24 | [WARNING ] Default peering object is not found, falling back to the standalone mode. 25 | [WARNING ] OS signals are ignored: running not in the main thread. 26 | 27 | Do the main app activity here. Step 1/3. 28 | 29 | [DEBUG ] [default/kopf-example-0] Creation is in progress: ... 30 | [DEBUG ] [default/kopf-example-0] Deletion is in progress: ... 31 | 32 | Do the main app activity here. Step 2/3. 33 | 34 | [DEBUG ] [default/kopf-example-1] Creation is in progress: ... 35 | [DEBUG ] [default/kopf-example-1] Deletion is in progress: ... 36 | 37 | Do the main app activity here. Step 3/3. 38 | 39 | [DEBUG ] [default/kopf-example-2] Creation is in progress: ... 40 | [DEBUG ] [default/kopf-example-2] Deletion is in progress: ... 41 | 42 | Exiting the main app. 43 | 44 | [INFO ] Stop-flag is set to True. Operator is stopping. 45 | [DEBUG ] Root task 'poster of events' is cancelled. 46 | [DEBUG ] Root task 'watcher of kopfexamples.kopf.dev' is cancelled. 47 | [DEBUG ] Root tasks are stopped: finished normally; tasks left: set() 48 | [DEBUG ] Hung tasks stopping is skipped: no tasks given. 49 | ``` 50 | -------------------------------------------------------------------------------- /tests/k8s/test_watching_bookmarks.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | 4 | import aiohttp.web 5 | 6 | from kopf._cogs.clients.watching import Bookmark, continuous_watch 7 | 8 | 9 | async def test_listed_is_inbetween( 10 | settings, resource, namespace, hostname, aresponses): 11 | 12 | # Resource version is used as a continutation for the watch-queries. 13 | list_data = {'metadata': {'resourceVersion': '123'}, 'items': [ 14 | {'spec': 'a'}, 15 | {'spec': 'b'}, 16 | ]} 17 | list_resp = aiohttp.web.json_response(list_data) 18 | list_url = resource.get_url(namespace=namespace) 19 | 20 | # The same as in the `stream` fixture. But here, we also mock lists. 21 | stream_data = [ 22 | {'type': 'ADDED', 'object': {'spec': 'c'}}, # stream.feed() 23 | {'type': 'ADDED', 'object': {'spec': 'd'}}, # stream.feed() 24 | {'type': 'ERROR', 'object': {'code': 410}}, # stream.close() 25 | ] 26 | stream_text = '\n'.join(json.dumps(event) for event in stream_data) 27 | stream_resp = aresponses.Response(text=stream_text) 28 | stream_query = {'watch': 'true', 'resourceVersion': '123'} 29 | stream_url = resource.get_url(namespace=namespace, params=stream_query) 30 | 31 | aresponses.add(hostname, list_url, 'get', list_resp, match_querystring=True) 32 | aresponses.add(hostname, stream_url, 'get', stream_resp, match_querystring=True) 33 | 34 | events = [] 35 | async for event in continuous_watch(settings=settings, 36 | resource=resource, 37 | namespace=namespace, 38 | operator_pause_waiter=asyncio.Future()): 39 | events.append(event) 40 | 41 | assert len(events) == 5 42 | assert events[0]['object']['spec'] == 'a' 43 | assert events[1]['object']['spec'] == 'b' 44 | assert events[2] == Bookmark.LISTED 45 | assert events[3]['object']['spec'] == 'c' 46 | assert events[4]['object']['spec'] == 'd' 47 | -------------------------------------------------------------------------------- /tests/handling/daemons/test_timer_intervals.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import kopf 4 | 5 | # TODO: tests for idle= (more complicated) 6 | 7 | 8 | async def test_timer_regular_interval( 9 | resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, frozen_time): 10 | caplog.set_level(logging.DEBUG) 11 | 12 | @kopf.timer(*resource, id='fn', interval=1.0, sharp=False) 13 | async def fn(**kwargs): 14 | dummy.mock() 15 | dummy.kwargs = kwargs 16 | dummy.steps['called'].set() 17 | frozen_time.tick(0.3) 18 | if dummy.mock.call_count >= 2: 19 | dummy.steps['finish'].set() 20 | kwargs['stopped']._setter.set() # to exit the cycle 21 | 22 | await simulate_cycle({}) 23 | await dummy.steps['called'].wait() 24 | await dummy.wait_for_daemon_done() 25 | 26 | assert dummy.mock.call_count == 2 27 | assert k8s_mocked.sleep.call_count == 2 28 | assert k8s_mocked.sleep.call_args_list[0][0][0] == 1.0 29 | assert k8s_mocked.sleep.call_args_list[1][0][0] == 1.0 30 | 31 | 32 | async def test_timer_sharp_interval( 33 | resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle, frozen_time): 34 | caplog.set_level(logging.DEBUG) 35 | 36 | @kopf.timer(*resource, id='fn', interval=1.0, sharp=True) 37 | async def fn(**kwargs): 38 | dummy.mock() 39 | dummy.kwargs = kwargs 40 | dummy.steps['called'].set() 41 | frozen_time.tick(0.3) 42 | if dummy.mock.call_count >= 2: 43 | dummy.steps['finish'].set() 44 | kwargs['stopped']._setter.set() # to exit the cycle 45 | 46 | await simulate_cycle({}) 47 | await dummy.steps['called'].wait() 48 | await dummy.steps['finish'].wait() 49 | await dummy.wait_for_daemon_done() 50 | 51 | assert dummy.mock.call_count == 2 52 | assert k8s_mocked.sleep.call_count == 2 53 | assert 0.7 <= k8s_mocked.sleep.call_args_list[0][0][0] < 0.71 54 | assert 0.7 <= k8s_mocked.sleep.call_args_list[1][0][0] < 0.71 55 | -------------------------------------------------------------------------------- /tests/k8s/test_list_objs.py: -------------------------------------------------------------------------------- 1 | import aiohttp.web 2 | import pytest 3 | 4 | from kopf._cogs.clients.errors import APIError 5 | from kopf._cogs.clients.fetching import list_objs 6 | from kopf._cogs.structs.credentials import LoginError 7 | 8 | 9 | async def test_listing_works( 10 | resp_mocker, aresponses, hostname, settings, logger, resource, namespace, 11 | cluster_resource, namespaced_resource): 12 | 13 | result = {'items': [{}, {}]} 14 | list_mock = resp_mocker(return_value=aiohttp.web.json_response(result)) 15 | cluster_url = cluster_resource.get_url(namespace=None) 16 | namespaced_url = namespaced_resource.get_url(namespace='ns') 17 | aresponses.add(hostname, cluster_url, 'get', list_mock) 18 | aresponses.add(hostname, namespaced_url, 'get', list_mock) 19 | 20 | items, resource_version = await list_objs( 21 | logger=logger, 22 | settings=settings, 23 | resource=resource, 24 | namespace=namespace, 25 | ) 26 | assert items == result['items'] 27 | 28 | assert list_mock.called 29 | assert list_mock.call_count == 1 30 | 31 | 32 | # Note: 401 is wrapped into a LoginError and is tested elsewhere. 33 | @pytest.mark.parametrize('status', [400, 403, 500, 666]) 34 | async def test_raises_direct_api_errors( 35 | resp_mocker, aresponses, hostname, settings, logger, status, resource, namespace, 36 | cluster_resource, namespaced_resource): 37 | 38 | list_mock = resp_mocker(return_value=aresponses.Response(status=status, reason='oops')) 39 | cluster_url = cluster_resource.get_url(namespace=None) 40 | namespaced_url = namespaced_resource.get_url(namespace='ns') 41 | aresponses.add(hostname, cluster_url, 'get', list_mock) 42 | aresponses.add(hostname, namespaced_url, 'get', list_mock) 43 | 44 | with pytest.raises(APIError) as e: 45 | await list_objs( 46 | logger=logger, 47 | settings=settings, 48 | resource=resource, 49 | namespace=namespace, 50 | ) 51 | assert e.value.status == status 52 | -------------------------------------------------------------------------------- /examples/05-handlers/README.md: -------------------------------------------------------------------------------- 1 | # Kopf example with multiple handlers 2 | 3 | Multiple handlers can be registered for the same event. 4 | They are executed in the order of registration. 5 | 6 | Besides the standard create-update-delete events, a per-field diff can be registered. 7 | It is called only in case of the specified field changes, 8 | with `old` & `new` set to that field's values. 9 | 10 | Start the operator (we skip the verbose mode here, for clarity): 11 | 12 | ```bash 13 | kopf run example.py 14 | ``` 15 | 16 | Trigger the object creation and monitor the stderr of the operator: 17 | 18 | ```bash 19 | $ kubectl apply -f ../obj.yaml 20 | ``` 21 | 22 | ``` 23 | CREATED 1st 24 | [2019-02-05 20:33:50,336] kopf.handling [INFO ] [default/kopf-example-1] Handler create_fn_1 succeeded. 25 | CREATED 2nd 26 | [2019-02-05 20:33:50,557] kopf.handling [INFO ] [default/kopf-example-1] Handler create_fn_2 succeeded. 27 | [2019-02-05 20:33:50,781] kopf.handling [INFO ] [default/kopf-example-1] All handlers succeeded. 28 | ``` 29 | 30 | Now, trigger the object change: 31 | 32 | ```bash 33 | $ kubectl patch -f ../obj.yaml --type merge -p '{"spec": {"field": "newvalue", "newfield": 100}}' 34 | ``` 35 | 36 | ``` 37 | UPDATED 38 | [2019-02-05 20:34:06,358] kopf.handling [INFO ] [default/kopf-example-1] Handler update_fn succeeded. 39 | FIELD CHANGED: value -> newvalue 40 | [2019-02-05 20:34:06,682] kopf.handling [INFO ] [default/kopf-example-1] Handler field_fn/spec.field succeeded. 41 | [2019-02-05 20:34:06,903] kopf.handling [INFO ] [default/kopf-example-1] All handlers succeeded. 42 | ``` 43 | 44 | Finally, delete the object: 45 | 46 | ```bash 47 | $ kubectl delete -f ../obj.yaml 48 | ``` 49 | 50 | ``` 51 | DELETED 1st 52 | [2019-02-05 20:34:42,496] kopf.handling [INFO ] [default/kopf-example-1] Handler delete_fn_1 succeeded. 53 | DELETED 2nd 54 | [2019-02-05 20:34:42,715] kopf.handling [INFO ] [default/kopf-example-1] Handler delete_fn_2 succeeded. 55 | [2019-02-05 20:34:42,934] kopf.handling [INFO ] [default/kopf-example-1] All handlers succeeded. 56 | ``` 57 | -------------------------------------------------------------------------------- /tests/settings/test_defaults.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import kopf 4 | 5 | 6 | async def test_declared_public_interface_and_promised_defaults(): 7 | settings = kopf.OperatorSettings() 8 | assert settings.posting.level == logging.INFO 9 | assert settings.peering.name == "default" 10 | assert settings.peering.stealth == False 11 | assert settings.peering.priority == 0 12 | assert settings.peering.lifetime == 60 13 | assert settings.peering.mandatory == False 14 | assert settings.peering.standalone == False 15 | assert settings.peering.namespaced == True 16 | assert settings.peering.clusterwide == False 17 | assert settings.watching.reconnect_backoff == 0.1 18 | assert settings.watching.connect_timeout is None 19 | assert settings.watching.server_timeout is None 20 | assert settings.watching.client_timeout is None 21 | assert settings.batching.worker_limit is None 22 | assert settings.batching.idle_timeout == 5.0 23 | assert settings.batching.exit_timeout == 2.0 24 | assert settings.batching.batch_window == 0.1 25 | assert settings.batching.error_delays == (1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610) 26 | assert settings.scanning.disabled == False 27 | assert settings.admission.server is None 28 | assert settings.admission.managed is None 29 | assert settings.execution.executor is not None 30 | assert settings.execution.max_workers is None 31 | assert settings.networking.request_timeout == 5 * 60 32 | assert settings.networking.connect_timeout is None 33 | 34 | 35 | async def test_peering_namespaced_is_modified_by_clusterwide(): 36 | settings = kopf.OperatorSettings() 37 | assert settings.peering.namespaced == True 38 | settings.peering.clusterwide = not settings.peering.clusterwide 39 | assert settings.peering.namespaced == False 40 | 41 | 42 | async def test_peering_clusterwide_is_modified_by_namespaced(): 43 | settings = kopf.OperatorSettings() 44 | assert settings.peering.clusterwide == False 45 | settings.peering.namespaced = not settings.peering.namespaced 46 | assert settings.peering.clusterwide == True 47 | -------------------------------------------------------------------------------- /tests/cli/test_preloading.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | 3 | 4 | def test_nothing(invoke, real_run): 5 | result = invoke(['run']) 6 | assert result.exit_code == 0 7 | 8 | registry = kopf.get_default_registry() 9 | handlers = registry._changing.get_all_handlers() 10 | assert len(handlers) == 0 11 | 12 | 13 | def test_one_file(invoke, real_run): 14 | result = invoke(['run', 'handler1.py']) 15 | assert result.exit_code == 0 16 | 17 | registry = kopf.get_default_registry() 18 | handlers = registry._changing.get_all_handlers() 19 | assert len(handlers) == 1 20 | assert handlers[0].id == 'create_fn' 21 | 22 | 23 | def test_two_files(invoke, real_run): 24 | result = invoke(['run', 'handler1.py', 'handler2.py']) 25 | assert result.exit_code == 0 26 | 27 | registry = kopf.get_default_registry() 28 | handlers = registry._changing.get_all_handlers() 29 | assert len(handlers) == 2 30 | assert handlers[0].id == 'create_fn' 31 | assert handlers[1].id == 'update_fn' 32 | 33 | 34 | def test_one_module(invoke, real_run): 35 | result = invoke(['run', '-m', 'package.module_1']) 36 | assert result.exit_code == 0 37 | 38 | registry = kopf.get_default_registry() 39 | handlers = registry._changing.get_all_handlers() 40 | assert len(handlers) == 1 41 | assert handlers[0].id == 'create_fn' 42 | 43 | 44 | def test_two_modules(invoke, real_run): 45 | result = invoke(['run', '-m', 'package.module_1', '-m', 'package.module_2']) 46 | assert result.exit_code == 0 47 | 48 | registry = kopf.get_default_registry() 49 | handlers = registry._changing.get_all_handlers() 50 | assert len(handlers) == 2 51 | assert handlers[0].id == 'create_fn' 52 | assert handlers[1].id == 'update_fn' 53 | 54 | 55 | def test_mixed_sources(invoke, real_run): 56 | result = invoke(['run', 'handler1.py', '-m', 'package.module_2']) 57 | assert result.exit_code == 0 58 | 59 | registry = kopf.get_default_registry() 60 | handlers = registry._changing.get_all_handlers() 61 | assert len(handlers) == 2 62 | assert handlers[0].id == 'create_fn' 63 | assert handlers[1].id == 'update_fn' 64 | -------------------------------------------------------------------------------- /tests/handling/test_parametrization.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from unittest.mock import Mock 3 | 4 | import kopf 5 | from kopf._cogs.structs.ephemera import Memo 6 | from kopf._core.engines.indexing import OperatorIndexers 7 | from kopf._core.reactor.inventory import ResourceMemories 8 | from kopf._core.reactor.processing import process_resource_event 9 | 10 | 11 | async def test_parameter_is_passed_when_specified(resource, cause_mock, registry, settings): 12 | mock = Mock() 13 | 14 | # If it works for this handler, we assume it works for all of them. 15 | # Otherwise, it is too difficult to trigger the actual invocation. 16 | @kopf.on.event(*resource, param=123) 17 | def fn(**kwargs): 18 | mock(**kwargs) 19 | 20 | event_queue = asyncio.Queue() 21 | await process_resource_event( 22 | lifecycle=kopf.lifecycles.all_at_once, 23 | registry=registry, 24 | settings=settings, 25 | resource=resource, 26 | indexers=OperatorIndexers(), 27 | memories=ResourceMemories(), 28 | memobase=Memo(), 29 | raw_event={'type': None, 'object': {}}, 30 | event_queue=event_queue, 31 | ) 32 | 33 | assert mock.called 34 | assert mock.call_args_list[0][1]['param'] == 123 35 | 36 | 37 | async def test_parameter_is_passed_even_if_not_specified(resource, cause_mock, registry, settings): 38 | mock = Mock() 39 | 40 | # If it works for this handler, we assume it works for all of them. 41 | # Otherwise, it is too difficult to trigger the actual invocation. 42 | @kopf.on.event(*resource) 43 | def fn(**kwargs): 44 | mock(**kwargs) 45 | 46 | event_queue = asyncio.Queue() 47 | await process_resource_event( 48 | lifecycle=kopf.lifecycles.all_at_once, 49 | registry=registry, 50 | settings=settings, 51 | resource=resource, 52 | indexers=OperatorIndexers(), 53 | memories=ResourceMemories(), 54 | memobase=Memo(), 55 | raw_event={'type': None, 'object': {}}, 56 | event_queue=event_queue, 57 | ) 58 | 59 | assert mock.called 60 | assert mock.call_args_list[0][1]['param'] is None 61 | -------------------------------------------------------------------------------- /tests/apis/test_iterjsonlines.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import Mock 2 | 3 | from kopf._cogs.clients.api import iter_jsonlines 4 | 5 | 6 | async def test_empty_content(): 7 | async def iter_chunked(n: int): 8 | if False: # to make this function a generator 9 | yield b'' 10 | 11 | content = Mock(iter_chunked=iter_chunked) 12 | lines = [] 13 | async for line in iter_jsonlines(content): 14 | lines.append(line) 15 | 16 | assert lines == [] 17 | 18 | 19 | async def test_empty_chunk(): 20 | async def iter_chunked(n: int): 21 | yield b'' 22 | 23 | content = Mock(iter_chunked=iter_chunked) 24 | lines = [] 25 | async for line in iter_jsonlines(content): 26 | lines.append(line) 27 | 28 | assert lines == [] 29 | 30 | 31 | async def test_one_chunk_one_line(): 32 | async def iter_chunked(n: int): 33 | yield b'hello' 34 | 35 | content = Mock(iter_chunked=iter_chunked) 36 | lines = [] 37 | async for line in iter_jsonlines(content): 38 | lines.append(line) 39 | 40 | assert lines == [b'hello'] 41 | 42 | 43 | async def test_one_chunk_two_lines(): 44 | async def iter_chunked(n: int): 45 | yield b'hello\nworld' 46 | 47 | content = Mock(iter_chunked=iter_chunked) 48 | lines = [] 49 | async for line in iter_jsonlines(content): 50 | lines.append(line) 51 | 52 | assert lines == [b'hello', b'world'] 53 | 54 | 55 | async def test_one_chunk_empty_lines(): 56 | async def iter_chunked(n: int): 57 | yield b'\n\nhello\n\nworld\n\n' 58 | 59 | content = Mock(iter_chunked=iter_chunked) 60 | lines = [] 61 | async for line in iter_jsonlines(content): 62 | lines.append(line) 63 | 64 | assert lines == [b'hello', b'world'] 65 | 66 | 67 | async def test_a_few_chunks_split(): 68 | async def iter_chunked(n: int): 69 | yield b'\n\nhell' 70 | yield b'o\n\nwor' 71 | yield b'ld\n\n' 72 | 73 | content = Mock(iter_chunked=iter_chunked) 74 | lines = [] 75 | async for line in iter_jsonlines(content): 76 | lines.append(line) 77 | 78 | assert lines == [b'hello', b'world'] 79 | -------------------------------------------------------------------------------- /docs/idempotence.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | Idempotence 3 | =========== 4 | 5 | Kopf provides tools to make the handlers idempotent. 6 | 7 | The :func:`kopf.register` function and the :func:`kopf.subhandler` decorator 8 | allow to schedule arbitrary sub-handlers for the execution in the current cycle. 9 | 10 | :func:`kopf.execute` coroutine executes arbitrary sub-handlers 11 | directly in the place of invocation, and returns when all they have succeeded. 12 | 13 | Every one of the sub-handlers is tracked by Kopf, and will not be executed 14 | twice within one handling cycle. 15 | 16 | .. code-block:: python 17 | 18 | import functools 19 | import kopf 20 | 21 | @kopf.on.create('kopfexamples') 22 | async def create(spec, namespace, **kwargs): 23 | print("Entering create()!") # executed ~7 times. 24 | await kopf.execute(fns={ 25 | 'a': create_a, 26 | 'b': create_b, 27 | }) 28 | print("Leaving create()!") # executed 1 time only. 29 | 30 | async def create_a(retry, **kwargs): 31 | if retry < 2: 32 | raise kopf.TemporaryError("Not ready yet.", delay=10) 33 | 34 | async def create_b(retry, **kwargs): 35 | if retry < 6: 36 | raise kopf.TemporaryError("Not ready yet.", delay=10) 37 | 38 | In this example, both ``create_a`` & ``create_b`` are submitted to Kopf 39 | as the sub-handlers of ``create`` on every attempt to execute it. 40 | It means, every ~10 seconds until both of the sub-handlers succeed, 41 | and the main handler succeeds too. 42 | 43 | The first one, ``create_a``, will succeed on the 3rd attempt after ~20s. 44 | The second one, ``create_b``, will succeed only on the 7th attempt after ~60s. 45 | 46 | However, despite ``create_a`` will be submitted whenever ``create`` 47 | and ``create_b`` are retried, it will not be executed in the 20s..60s range, 48 | as it has succeeded already, and the record about this is stored on the object. 49 | 50 | This approach can be used to perform operations, which needs protection 51 | from double-execution, such as the children object creation with randomly 52 | generated names (e.g. Pods, Jobs, PersistentVolumeClaims, etc). 53 | 54 | .. seealso:: 55 | :ref:`persistence`, :ref:`subhandlers`. 56 | -------------------------------------------------------------------------------- /examples/08-events/README.md: -------------------------------------------------------------------------------- 1 | # Kopf example with spy-handlers for the raw events 2 | 3 | Kopf stores its handler status on the objects' status field. 4 | This can be not desired when the objects do not belong to this operator, 5 | but a probably served by some other operator, and are just watched 6 | by the current operator, e.g. for their status fields. 7 | 8 | Event-watching handlers can be used as the silent spies on the raw events: 9 | they do not store anything on the object, and do not create the k8s-events. 10 | 11 | If the event handler fails, the error is logged to the operator's log, 12 | and then ignored. 13 | 14 | Please note that the event handlers are invoked for *every* event received 15 | from the watching stream. This also includes the first-time listing when 16 | the operator starts or restarts. It is the developer's responsibility to make 17 | the handlers idempotent (re-executable with do duplicated side-effects). 18 | 19 | Start the operator: 20 | 21 | ```bash 22 | kopf run example.py --verbose 23 | ``` 24 | 25 | Trigger the object creation and monitor the stderr of the operator: 26 | 27 | ```bash 28 | $ kubectl apply -f ../obj.yaml 29 | ``` 30 | 31 | Observe how the event-handlers are invoked. 32 | 33 | ``` 34 | [2019-05-28 11:03:29,537] kopf.reactor.handlin [DEBUG ] [default/kopf-example-1] Invoking handler 'event_fn_with_error'. 35 | [2019-05-28 11:03:29,537] kopf.reactor.handlin [ERROR ] [default/kopf-example-1] Handler 'event_fn_with_error' failed with an exception. Will ignore. 36 | Traceback (most recent call last): 37 | File ".../kopf/reactor/handling.py", line 159, in handle_event 38 | File ".../kopf/reactor/invocation.py", line 64, in invoke 39 | File "example.py", line 6, in event_fn_with_error 40 | raise Exception("Oops!") 41 | Exception: Oops! 42 | 43 | [2019-05-28 11:03:29,541] kopf.reactor.handlin [DEBUG ] [default/kopf-example-1] Invoking handler 'normal_event_fn'. 44 | Event received: {'type': 'ADDED', 'object': {'apiVersion': 'kopf.dev/v1', 'kind': 'KopfExample', ...} 45 | [2019-05-28 11:03:29,541] kopf.reactor.handlin [INFO ] [default/kopf-example-1] Handler 'normal_event_fn' succeeded. 46 | ``` 47 | 48 | Cleanup in the end: 49 | 50 | ```bash 51 | $ kubectl delete -f ../obj.yaml 52 | ``` 53 | -------------------------------------------------------------------------------- /tests/authentication/test_connectioninfo.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | from kopf._cogs.structs.credentials import ConnectionInfo, VaultKey 4 | 5 | 6 | def test_key_as_string(): 7 | key = VaultKey('some-key') 8 | assert isinstance(key, str) 9 | assert key == 'some-key' 10 | 11 | 12 | def test_creation_with_minimal_fields(): 13 | info = ConnectionInfo( 14 | server='https://localhost', 15 | ) 16 | assert info.server == 'https://localhost' 17 | assert info.ca_path is None 18 | assert info.ca_data is None 19 | assert info.insecure is None 20 | assert info.username is None 21 | assert info.password is None 22 | assert info.scheme is None 23 | assert info.token is None 24 | assert info.certificate_path is None 25 | assert info.certificate_data is None 26 | assert info.private_key_path is None 27 | assert info.private_key_data is None 28 | assert info.default_namespace is None 29 | assert info.expiration is None 30 | 31 | 32 | def test_creation_with_maximal_fields(): 33 | info = ConnectionInfo( 34 | server='https://localhost', 35 | ca_path='/ca/path', 36 | ca_data=b'ca_data', 37 | insecure=True, 38 | username='username', 39 | password='password', 40 | scheme='scheme', 41 | token='token', 42 | certificate_path='/cert/path', 43 | certificate_data=b'cert_data', 44 | private_key_path='/pkey/path', 45 | private_key_data=b'pkey_data', 46 | default_namespace='default', 47 | expiration=datetime.datetime.max, 48 | ) 49 | assert info.server == 'https://localhost' 50 | assert info.ca_path == '/ca/path' 51 | assert info.ca_data == b'ca_data' 52 | assert info.insecure is True 53 | assert info.username == 'username' 54 | assert info.password == 'password' 55 | assert info.scheme == 'scheme' 56 | assert info.token == 'token' 57 | assert info.certificate_path == '/cert/path' 58 | assert info.certificate_data == b'cert_data' 59 | assert info.private_key_path == '/pkey/path' 60 | assert info.private_key_data == b'pkey_data' 61 | assert info.default_namespace == 'default' 62 | assert info.expiration == datetime.datetime.max 63 | -------------------------------------------------------------------------------- /tests/diffs/test_protocols.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kopf._cogs.structs.diffs import Diff, DiffItem, DiffOperation 4 | 5 | 6 | @pytest.mark.parametrize('operation', list(DiffOperation)) 7 | def test_operation_enum_behaves_as_string(operation: DiffOperation): 8 | assert isinstance(operation, str) 9 | assert operation == operation.value 10 | assert str(operation) == str(operation.value) 11 | assert repr(operation) == repr(operation.value) 12 | 13 | 14 | @pytest.mark.parametrize('operation', list(DiffOperation)) 15 | def test_item_has_all_expected_properties(operation): 16 | item = DiffItem(operation, ('field',), 'a', 'b') 17 | assert item.operation is operation 18 | assert item.op is operation 19 | assert item.field == ('field',) 20 | assert item.old == 'a' 21 | assert item.new == 'b' 22 | 23 | 24 | @pytest.mark.parametrize('operation', list(DiffOperation)) 25 | def test_item_comparison_to_tuple(operation): 26 | item = DiffItem(operation.value, (), 'a', 'b') 27 | assert item == (operation.value, (), 'a', 'b') 28 | 29 | 30 | @pytest.mark.parametrize('operation', list(DiffOperation)) 31 | def test_item_comparison_to_list(operation): 32 | item = DiffItem(operation.value, (), 'a', 'b') 33 | assert item == [operation.value, (), 'a', 'b'] 34 | 35 | 36 | @pytest.mark.parametrize('operation', list(DiffOperation)) 37 | def test_item_comparison_to_another_item(operation): 38 | item1 = DiffItem(operation.value, (), 'a', 'b') 39 | item2 = DiffItem(operation.value, (), 'a', 'b') 40 | assert item1 == item2 41 | 42 | 43 | # TODO: later implement it so that the order of items is irrelevant. 44 | def test_diff_comparison_to_the_same(): 45 | d1 = Diff([ 46 | DiffItem(DiffOperation.ADD , ('key1',), None, 'new1'), 47 | DiffItem(DiffOperation.CHANGE, ('key2',), 'old2', 'new2'), 48 | DiffItem(DiffOperation.REMOVE, ('key3',), 'old3', None), 49 | ]) 50 | d2 = Diff([ 51 | DiffItem(DiffOperation.ADD , ('key1',), None, 'new1'), 52 | DiffItem(DiffOperation.CHANGE, ('key2',), 'old2', 'new2'), 53 | DiffItem(DiffOperation.REMOVE, ('key3',), 'old3', None), 54 | ]) 55 | assert d1 == d2 56 | assert hash(d1) == hash(d2) 57 | assert d1 is not d2 58 | -------------------------------------------------------------------------------- /kopf/_cogs/helpers/hostnames.py: -------------------------------------------------------------------------------- 1 | import ipaddress 2 | import socket 3 | 4 | 5 | def get_descriptive_hostname() -> str: 6 | """ 7 | Look for non-numeric hostnames of the machine where the operator runs. 8 | 9 | The purpose is the host identification, not the actual host accessability. 10 | 11 | Similar to :func:`socket.getfqdn`, but IPv6 pseudo-hostnames are excluded -- 12 | they are not helpful in identifying the actual host running the operator: 13 | e.g. "1.0.0...0.ip6.arpa". 14 | """ 15 | try: 16 | hostname, aliases, ipaddrs = socket.gethostbyaddr(socket.gethostname()) 17 | except OSError: 18 | pass 19 | else: 20 | ipv4: ipaddress.IPv4Address | None 21 | ipv6: ipaddress.IPv6Address | None 22 | parsed: list[tuple[str, ipaddress.IPv4Address | None, ipaddress.IPv6Address | None]] 23 | parsed = [] 24 | for name in [hostname] + list(aliases) + list(ipaddrs): 25 | try: 26 | ipv4 = ipaddress.IPv4Address(name) 27 | except ipaddress.AddressValueError: 28 | ipv4 = None 29 | try: 30 | ipv6 = ipaddress.IPv6Address(name) 31 | except ipaddress.AddressValueError: 32 | ipv6 = None 33 | parsed.append((name, ipv4, ipv6)) 34 | 35 | # Dotted hostname (fqdn) is always better, unless it is an ARPA-name or an IP-address. 36 | for name, ipv4, ipv6 in parsed: 37 | if '.' in name and not name.endswith('.arpa') and not ipv4 and not ipv6: 38 | return remove_useless_suffixes(name) 39 | 40 | # Non-dotted hostname is fine too, unless it is ARPA-name/IP-address or a localhost. 41 | for name, ipv4, ipv6 in parsed: 42 | if name != 'localhost' and not name.endswith('.arpa') and not ipv4 and not ipv6: 43 | return remove_useless_suffixes(name) 44 | 45 | return remove_useless_suffixes(socket.gethostname()) 46 | 47 | 48 | def remove_useless_suffixes(hostname: str) -> str: 49 | suffixes = ['.local', '.localdomain'] 50 | while any(hostname.endswith(suffix) for suffix in suffixes): 51 | for suffix in suffixes: 52 | hostname = hostname.removesuffix(suffix) 53 | return hostname 54 | -------------------------------------------------------------------------------- /kopf/_kits/loops.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import contextlib 3 | import sys 4 | from collections.abc import Iterator 5 | 6 | 7 | @contextlib.contextmanager 8 | def proper_loop(suggested_loop: asyncio.AbstractEventLoop | None = None) -> Iterator[asyncio.AbstractEventLoop | None]: 9 | """ 10 | Ensure that we have the proper loop, either suggested or properly managed. 11 | 12 | A "properly managed" loop is the one we own and therefore close. 13 | If ``uvloop`` is installed, it is used. 14 | Otherwise, the event loop policy remains unaffected. 15 | 16 | This loop manager is usually used in CLI only, not deeper than that; 17 | i.e. not even in ``kopf.run()``, since uvloop is only auto-managed for CLI. 18 | """ 19 | # Event loop policies were deprecated in 3.14 entirely. Yet they still exist in older versions. 20 | # However, the asyncio.Runner was introduced in Python 3.11, so we can use the logic from there. 21 | if suggested_loop is not None: 22 | yield suggested_loop 23 | 24 | elif sys.version_info >= (3, 11): # optional in 3.11-3.13, mandatory in >=3.14 25 | # Use uvloop if available by injecting it as the selected loop. 26 | try: 27 | import uvloop 28 | except ImportError: 29 | pass 30 | else: 31 | with asyncio.Runner(loop_factory=uvloop.new_event_loop) as runner: 32 | yield runner.get_loop() 33 | return 34 | 35 | # Use the default loop/runner in place, do not inject anything. 36 | yield None 37 | 38 | # For Python<=3.10, use the event-loop-policy-based injection. 39 | else: 40 | original_policy = asyncio.get_event_loop_policy() 41 | if suggested_loop is None: # the pure CLI use, not a KopfRunner or other code 42 | try: 43 | import uvloop 44 | except ImportError: 45 | pass 46 | else: 47 | asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) 48 | 49 | try: 50 | yield 51 | 52 | finally: 53 | try: 54 | import uvloop 55 | except ImportError: 56 | pass 57 | else: 58 | asyncio.set_event_loop_policy(original_policy) 59 | -------------------------------------------------------------------------------- /tests/basic-structs/test_memos.py: -------------------------------------------------------------------------------- 1 | import collections.abc 2 | 3 | import pytest 4 | 5 | from kopf._cogs.structs.ephemera import Memo 6 | 7 | 8 | def test_creation_with_defaults(): 9 | obj = Memo() 10 | assert isinstance(obj, collections.abc.MutableMapping) 11 | assert not set(obj) 12 | 13 | 14 | def test_creation_with_dict(): 15 | obj = Memo({'xyz': 100}) 16 | assert isinstance(obj, collections.abc.MutableMapping) 17 | assert set(obj) == {'xyz'} 18 | 19 | 20 | def test_creation_with_list(): 21 | obj = Memo([('xyz', 100)]) 22 | assert isinstance(obj, collections.abc.MutableMapping) 23 | assert set(obj) == {'xyz'} 24 | 25 | 26 | def test_creation_with_memo(): 27 | obj = Memo(Memo({'xyz': 100})) 28 | assert isinstance(obj, collections.abc.MutableMapping) 29 | assert set(obj) == {'xyz'} 30 | 31 | 32 | def test_fields_are_keys(): 33 | obj = Memo() 34 | obj.xyz = 100 35 | assert obj['xyz'] == 100 36 | 37 | 38 | def test_keys_are_fields(): 39 | obj = Memo() 40 | obj['xyz'] = 100 41 | assert obj.xyz == 100 42 | 43 | 44 | def test_keys_deleted(): 45 | obj = Memo() 46 | obj['xyz'] = 100 47 | del obj['xyz'] 48 | assert obj == {} 49 | 50 | 51 | def test_fields_deleted(): 52 | obj = Memo() 53 | obj.xyz = 100 54 | del obj.xyz 55 | assert obj == {} 56 | 57 | 58 | def test_raises_key_errors_on_get(): 59 | obj = Memo() 60 | with pytest.raises(KeyError): 61 | obj['unexistent'] 62 | 63 | 64 | def test_raises_attribute_errors_on_get(): 65 | obj = Memo() 66 | with pytest.raises(AttributeError): 67 | obj.unexistent 68 | 69 | 70 | def test_raises_key_errors_on_del(): 71 | obj = Memo() 72 | with pytest.raises(KeyError): 73 | del obj['unexistent'] 74 | 75 | 76 | def test_raises_attribute_errors_on_del(): 77 | obj = Memo() 78 | with pytest.raises(AttributeError): 79 | del obj.unexistent 80 | 81 | 82 | def test_shallow_copied_keys(): 83 | obj1 = Memo({'xyz': 100}) 84 | obj2 = Memo(obj1) 85 | obj1['xyz'] = 200 86 | assert obj2['xyz'] == 100 87 | 88 | 89 | def test_shallow_copied_values(): 90 | obj1 = Memo({'xyz': 100}) 91 | obj2 = Memo(dat=obj1) 92 | obj1['xyz'] = 200 93 | assert obj2['dat']['xyz'] == 200 94 | -------------------------------------------------------------------------------- /tests/reactor/conftest.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import functools 3 | from unittest.mock import AsyncMock 4 | 5 | import pytest 6 | 7 | from kopf._cogs.clients.watching import infinite_watch 8 | from kopf._core.reactor.queueing import watcher, worker as original_worker 9 | 10 | 11 | @pytest.fixture(autouse=True) 12 | def _autouse_resp_mocker(resp_mocker): 13 | pass 14 | 15 | 16 | @pytest.fixture() 17 | def processor(): 18 | """ A mock for processor -- to be checked if the handler has been called. """ 19 | return AsyncMock() 20 | 21 | 22 | @pytest.fixture() 23 | def worker_spy(mocker): 24 | """ Spy on the watcher: actually call it, but provide the mock-fields. """ 25 | spy = AsyncMock(spec=original_worker, wraps=original_worker) 26 | return mocker.patch('kopf._core.reactor.queueing.worker', spy) 27 | 28 | 29 | @pytest.fixture() 30 | def worker_mock(mocker): 31 | """ Prevent the queue consumption, so that the queues could be checked. """ 32 | return mocker.patch('kopf._core.reactor.queueing.worker') 33 | 34 | 35 | @pytest.fixture() 36 | def watcher_limited(mocker, settings): 37 | """ Make event streaming finite, watcher exits after depletion. """ 38 | settings.watching.reconnect_backoff = 0 39 | mocker.patch('kopf._cogs.clients.watching.infinite_watch', 40 | new=functools.partial(infinite_watch, _iterations=1)) 41 | 42 | 43 | @pytest.fixture() 44 | async def watcher_in_background(settings, resource, worker_spy, stream): 45 | 46 | # Prevent remembering the streaming objects in the mocks. 47 | async def do_nothing(*args, **kwargs): 48 | pass 49 | 50 | # Prevent any real streaming for the very beginning, before it even starts. 51 | stream.feed([]) 52 | 53 | # Spawn a watcher in the background. 54 | coro = watcher( 55 | namespace=None, 56 | resource=resource, 57 | settings=settings, 58 | processor=do_nothing, 59 | ) 60 | task = asyncio.create_task(coro) 61 | 62 | try: 63 | # Go for a test. 64 | yield task 65 | finally: 66 | # Terminate the watcher to cleanup the loop. 67 | task.cancel() 68 | try: 69 | await task 70 | except asyncio.CancelledError: 71 | pass # cancellations are expected at this point 72 | -------------------------------------------------------------------------------- /kopf/_cogs/helpers/thirdparty.py: -------------------------------------------------------------------------------- 1 | """ 2 | Type definitions from optional 3rd-party libraries, e.g. pykube-ng & kubernetes. 3 | 4 | This utility does all the trickery needed to import the libraries if possible, 5 | or to skip them and make typing/runtime dummies for the rest of the codebase. 6 | """ 7 | import abc 8 | from typing import Any, Protocol, TypeAlias 9 | 10 | 11 | # Since client libraries are optional, support their objects only if they are installed. 12 | # If not installed, use a dummy class to miss all isinstance() checks for that library. 13 | class _dummy: pass 14 | 15 | 16 | # Do these imports look excessive? ==> https://github.com/python/mypy/issues/10063 17 | # TL;DR: Strictly `from...import...as...`, AND strictly same-named (`X as X`). 18 | try: 19 | from pykube.objects import APIObject as APIObject 20 | PykubeObject = APIObject 21 | except ImportError: 22 | PykubeObject = _dummy 23 | 24 | try: 25 | from kubernetes.client import V1ObjectMeta as V1ObjectMeta, V1OwnerReference as V1OwnerReference 26 | except ImportError: 27 | V1ObjectMeta = V1OwnerReference = None 28 | 29 | 30 | class V1OwnerReferenceProtocol(Protocol): 31 | block_owner_deletion: bool 32 | controller: bool 33 | api_version: str 34 | kind: str 35 | name: str 36 | uid: str 37 | 38 | 39 | class V1ObjectMetaProtocol(Protocol): 40 | owner_references: list[V1OwnerReferenceProtocol] 41 | labels: dict[str, str] 42 | name: str 43 | namespace: str | None 44 | generate_name: str | None 45 | 46 | 47 | # Kubernetes client does not have any common base classes, its code is fully generated. 48 | # Only recognise classes from a specific module. Ignore all API/HTTP/auth-related tools. 49 | class KubernetesModel(abc.ABC): 50 | @classmethod 51 | def __subclasshook__(cls, subcls: Any) -> Any: # suppress types in this hack 52 | if cls is KubernetesModel: 53 | if any(C.__module__.startswith('kubernetes.client.models.') for C in subcls.__mro__): 54 | return True 55 | return NotImplemented 56 | 57 | @property 58 | def metadata(self) -> V1ObjectMetaProtocol | None: 59 | raise NotImplementedError 60 | 61 | @metadata.setter 62 | def metadata(self, _: V1ObjectMetaProtocol | None) -> None: 63 | raise NotImplementedError 64 | -------------------------------------------------------------------------------- /kopf/_core/actions/lifecycles.py: -------------------------------------------------------------------------------- 1 | """ 2 | A few simple lifecycles for the handlers. 3 | 4 | New lifecycles can be implemented the same way: accept ``handlers`` 5 | in the order they are registered (except those already succeeded), 6 | and return the list of handlers in the order and amount to be executed. 7 | 8 | The default behaviour of the framework is the most simplistic: 9 | execute in the order they are registered, one by one. 10 | """ 11 | import logging 12 | import random 13 | from collections.abc import Sequence 14 | from typing import Any 15 | 16 | from kopf._core.actions import execution 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | Handlers = Sequence[execution.Handler] 21 | 22 | 23 | def all_at_once(handlers: Handlers, **_: Any) -> Handlers: 24 | """ Execute all handlers at once, in one event reaction cycle, if possible. """ 25 | return handlers 26 | 27 | 28 | def one_by_one(handlers: Handlers, **_: Any) -> Handlers: 29 | """ Execute handlers one at a time, in the order they were registered. """ 30 | return handlers[:1] 31 | 32 | 33 | def randomized(handlers: Handlers, **_: Any) -> Handlers: 34 | """ Execute one handler at a time, in the random order. """ 35 | return [random.choice(handlers)] if handlers else [] 36 | 37 | 38 | def shuffled(handlers: Handlers, **_: Any) -> Handlers: 39 | """ Execute all handlers at once, but in the random order. """ 40 | return random.sample(handlers, k=len(handlers)) if handlers else [] 41 | 42 | 43 | def asap(handlers: Handlers, *, state: execution.State, **_: Any) -> Handlers: 44 | """ Execute one handler at a time, skip on failure, try the next one, retry after the full cycle. """ 45 | 46 | def keyfn(handler: execution.Handler) -> int: 47 | return state[handler.id].retries or 0 48 | 49 | return sorted(handlers, key=keyfn)[:1] 50 | 51 | 52 | _default_lifecycle: execution.LifeCycleFn = asap 53 | 54 | 55 | def get_default_lifecycle() -> execution.LifeCycleFn: 56 | return _default_lifecycle 57 | 58 | 59 | def set_default_lifecycle(lifecycle: execution.LifeCycleFn | None) -> None: 60 | global _default_lifecycle 61 | if _default_lifecycle is not None: 62 | logger.warning(f"The default lifecycle is already set to {_default_lifecycle}, overriding it to {lifecycle}.") 63 | _default_lifecycle = lifecycle if lifecycle is not None else asap 64 | -------------------------------------------------------------------------------- /tests/primitives/test_containers.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import pytest 4 | 5 | from kopf._cogs.aiokits.aiovalues import Container 6 | 7 | 8 | async def test_empty_by_default(): 9 | container = Container() 10 | with pytest.raises(asyncio.TimeoutError): 11 | await asyncio.wait_for(container.wait(), timeout=0.1) 12 | 13 | 14 | async def test_does_not_wake_up_when_reset(timer): 15 | container = Container() 16 | 17 | async def reset_it(): 18 | await container.reset() 19 | 20 | loop = asyncio.get_running_loop() 21 | loop.call_later(0.05, asyncio.create_task, reset_it()) 22 | 23 | with pytest.raises(asyncio.TimeoutError): 24 | await asyncio.wait_for(container.wait(), timeout=0.1) 25 | 26 | 27 | async def test_wakes_up_when_preset(timer): 28 | container = Container() 29 | await container.set(123) 30 | 31 | with timer: 32 | result = await container.wait() 33 | 34 | assert timer.seconds <= 0.1 35 | assert result == 123 36 | 37 | 38 | async def test_wakes_up_when_set(timer): 39 | container = Container() 40 | 41 | async def set_it(): 42 | await container.set(123) 43 | 44 | loop = asyncio.get_running_loop() 45 | loop.call_later(0.1, asyncio.create_task, set_it()) 46 | 47 | with timer: 48 | result = await container.wait() 49 | 50 | assert 0.1 <= timer.seconds <= 0.2 51 | assert result == 123 52 | 53 | 54 | async def test_iterates_when_set(timer): 55 | container = Container() 56 | 57 | async def set_it(v): 58 | await container.set(v) 59 | 60 | loop = asyncio.get_running_loop() 61 | loop.call_later(0.1, asyncio.create_task, set_it(123)) 62 | loop.call_later(0.2, asyncio.create_task, set_it(234)) 63 | 64 | values = [] 65 | with timer: 66 | async for value in container.as_changed(): 67 | values.append(value) 68 | if value == 234: 69 | break 70 | 71 | assert 0.2 <= timer.seconds <= 0.3 72 | assert values == [123, 234] 73 | 74 | 75 | async def test_iterates_when_preset(timer): 76 | container = Container() 77 | await container.set(123) 78 | 79 | values = [] 80 | with timer: 81 | async for value in container.as_changed(): 82 | values.append(value) 83 | break 84 | 85 | assert timer.seconds <= 0.1 86 | assert values == [123] 87 | -------------------------------------------------------------------------------- /tests/logging/test_loggers.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kopf._cogs.structs.bodies import Body 4 | from kopf._core.actions.loggers import LocalObjectLogger, ObjectLogger 5 | 6 | 7 | # Async -- to make the log enqueueing loop running. 8 | @pytest.mark.parametrize('cls', [ObjectLogger, LocalObjectLogger]) 9 | async def test_mandatory_body(cls, settings, caplog): 10 | with pytest.raises(TypeError): 11 | cls(settings=settings) 12 | 13 | 14 | # Async -- to make the log enqueueing loop running. 15 | @pytest.mark.parametrize('cls', [ObjectLogger, LocalObjectLogger]) 16 | async def test_mandatory_settings(cls, settings, caplog): 17 | with pytest.raises(TypeError): 18 | cls(body=Body({})) 19 | 20 | 21 | # Async -- to make the log enqueueing loop running. 22 | @pytest.mark.parametrize('cls', [ObjectLogger, LocalObjectLogger]) 23 | async def test_extras_from_metadata(cls, settings, caplog): 24 | body = Body({ 25 | 'kind': 'kind1', 26 | 'apiVersion': 'api1/v1', 27 | 'metadata': {'uid': 'uid1', 'name': 'name1', 'namespace': 'namespace1'}, 28 | }) 29 | 30 | logger = cls(body=body, settings=settings) 31 | logger.info("hello") 32 | 33 | assert len(caplog.records) == 1 34 | assert hasattr(caplog.records[0], 'k8s_ref') 35 | assert caplog.records[0].k8s_ref == { 36 | 'uid': 'uid1', 37 | 'name': 'name1', 38 | 'namespace': 'namespace1', 39 | 'apiVersion': 'api1/v1', 40 | 'kind': 'kind1', 41 | } 42 | 43 | 44 | # Async -- to make the log enqueueing loop running. 45 | @pytest.mark.parametrize('cls', [ObjectLogger]) 46 | async def test_k8s_posting_enabled_in_a_regular_logger(cls, settings, caplog): 47 | body = Body({}) 48 | 49 | logger = cls(body=body, settings=settings) 50 | logger.info("hello") 51 | 52 | assert len(caplog.records) == 1 53 | assert hasattr(caplog.records[0], 'k8s_skip') 54 | assert caplog.records[0].k8s_skip is False 55 | 56 | 57 | # Async -- to make the log enqueueing loop running. 58 | @pytest.mark.parametrize('cls', [LocalObjectLogger]) 59 | async def test_k8s_posting_disabled_in_a_local_logger(cls, settings, caplog): 60 | body = Body({}) 61 | 62 | logger = cls(body=body, settings=settings) 63 | logger.info("hello") 64 | 65 | assert len(caplog.records) == 1 66 | assert hasattr(caplog.records[0], 'k8s_skip') 67 | assert caplog.records[0].k8s_skip is True 68 | -------------------------------------------------------------------------------- /tests/timing/test_sleeping.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import pytest 4 | 5 | from kopf._cogs.aiokits.aiotime import sleep 6 | 7 | 8 | async def test_the_only_delay_is_awaited(timer): 9 | with timer: 10 | unslept = await sleep(0.10) 11 | assert 0.10 <= timer.seconds < 0.11 12 | assert unslept is None 13 | 14 | 15 | async def test_the_shortest_delay_is_awaited(timer): 16 | with timer: 17 | unslept = await sleep([0.10, 0.20]) 18 | assert 0.10 <= timer.seconds < 0.11 19 | assert unslept is None 20 | 21 | 22 | async def test_specific_delays_only_are_awaited(timer): 23 | with timer: 24 | unslept = await sleep([0.10, None]) 25 | assert 0.10 <= timer.seconds < 0.11 26 | assert unslept is None 27 | 28 | 29 | @pytest.mark.parametrize('delays', [ 30 | pytest.param([1000, -10], id='mixed-signs'), 31 | pytest.param([-100, -10], id='all-negative'), 32 | pytest.param(-10, id='alone'), 33 | ]) 34 | async def test_negative_delays_skip_sleeping(timer, delays): 35 | with timer: 36 | unslept = await sleep(delays) 37 | assert timer.seconds < 0.01 38 | assert unslept is None 39 | 40 | 41 | @pytest.mark.parametrize('delays', [ 42 | pytest.param([], id='empty-list'), 43 | pytest.param([None], id='list-of-none'), 44 | ]) 45 | async def test_no_delays_skip_sleeping(timer, delays): 46 | with timer: 47 | unslept = await sleep(delays) 48 | assert timer.seconds < 0.01 49 | assert unslept is None 50 | 51 | 52 | async def test_by_event_set_before_time_comes(timer): 53 | event = asyncio.Event() 54 | asyncio.get_running_loop().call_later(0.07, event.set) 55 | with timer: 56 | unslept = await sleep(0.10, event) 57 | assert unslept is not None 58 | assert 0.02 <= unslept <= 0.04 59 | assert 0.06 <= timer.seconds <= 0.08 60 | 61 | 62 | async def test_with_zero_time_and_event_initially_cleared(timer): 63 | event = asyncio.Event() 64 | event.clear() 65 | with timer: 66 | unslept = await sleep(0, event) 67 | assert timer.seconds <= 0.01 68 | assert unslept is None 69 | 70 | 71 | async def test_with_zero_time_and_event_initially_set(timer): 72 | event = asyncio.Event() 73 | event.set() 74 | with timer: 75 | unslept = await sleep(0, event) 76 | assert timer.seconds <= 0.01 77 | assert not unslept # 0/None; undefined for such case: both goals reached. 78 | -------------------------------------------------------------------------------- /tests/observation/test_processing_of_namespaces.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import pytest 4 | 5 | from kopf._cogs.structs.bodies import RawBody, RawEvent 6 | from kopf._cogs.structs.references import Insights 7 | from kopf._core.reactor.observation import process_discovered_namespace_event 8 | 9 | 10 | async def test_initial_listing_is_ignored(): 11 | insights = Insights() 12 | e1 = RawEvent(type=None, object=RawBody(metadata={'name': 'ns1'})) 13 | 14 | async def delayed_injection(delay: float): 15 | await asyncio.sleep(delay) 16 | await process_discovered_namespace_event( 17 | insights=insights, raw_event=e1, namespaces=['ns*']) 18 | 19 | task = asyncio.create_task(delayed_injection(0)) 20 | with pytest.raises(asyncio.TimeoutError): 21 | async with insights.revised: 22 | await asyncio.wait_for(insights.revised.wait(), timeout=0.1) 23 | await task 24 | assert not insights.namespaces 25 | 26 | 27 | @pytest.mark.parametrize('etype', ['ADDED', 'MODIFIED']) 28 | async def test_followups_for_addition(timer, etype): 29 | insights = Insights() 30 | e1 = RawEvent(type=etype, object=RawBody(metadata={'name': 'ns1'})) 31 | 32 | async def delayed_injection(delay: float): 33 | await asyncio.sleep(delay) 34 | await process_discovered_namespace_event( 35 | insights=insights, raw_event=e1, namespaces=['ns*']) 36 | 37 | task = asyncio.create_task(delayed_injection(0.1)) 38 | with timer: 39 | async with insights.revised: 40 | await insights.revised.wait() 41 | await task 42 | assert 0.1 < timer.seconds < 0.11 43 | assert insights.namespaces == {'ns1'} 44 | 45 | 46 | @pytest.mark.parametrize('etype', ['DELETED']) 47 | async def test_followups_for_deletion(timer, etype): 48 | insights = Insights() 49 | insights.namespaces.add('ns1') 50 | e1 = RawEvent(type=etype, object=RawBody(metadata={'name': 'ns1'})) 51 | 52 | async def delayed_injection(delay: float): 53 | await asyncio.sleep(delay) 54 | await process_discovered_namespace_event( 55 | insights=insights, raw_event=e1, namespaces=['ns*']) 56 | 57 | task = asyncio.create_task(delayed_injection(0.1)) 58 | with timer: 59 | async with insights.revised: 60 | await insights.revised.wait() 61 | await task 62 | assert 0.1 < timer.seconds < 0.11 63 | assert not insights.namespaces 64 | -------------------------------------------------------------------------------- /kopf/_cogs/aiokits/aioadapters.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import concurrent.futures 3 | import threading 4 | from typing import Any 5 | 6 | from kopf._cogs.aiokits import aiotasks 7 | 8 | Flag = aiotasks.Future | asyncio.Event | concurrent.futures.Future[Any] | threading.Event 9 | 10 | 11 | async def wait_flag( 12 | flag: Flag | None, 13 | ) -> Any: 14 | """ 15 | Wait for a flag to be raised. 16 | 17 | Non-asyncio primitives are generally not our worry, 18 | but we support them for convenience. 19 | """ 20 | match flag: 21 | case None: 22 | return None 23 | case asyncio.Future(): 24 | return await flag 25 | case asyncio.Event(): 26 | return await flag.wait() 27 | case concurrent.futures.Future(): 28 | loop = asyncio.get_running_loop() 29 | return await loop.run_in_executor(None, flag.result) 30 | case threading.Event(): 31 | loop = asyncio.get_running_loop() 32 | return await loop.run_in_executor(None, flag.wait) 33 | case _: 34 | raise TypeError(f"Unsupported type of a flag: {flag!r}") 35 | 36 | 37 | async def raise_flag( 38 | flag: Flag | None, 39 | ) -> None: 40 | """ 41 | Raise a flag. 42 | 43 | Non-asyncio primitives are generally not our worry, 44 | but we support them for convenience. 45 | """ 46 | match flag: 47 | case None: 48 | return None 49 | case asyncio.Future(): 50 | flag.set_result(None) 51 | case asyncio.Event(): 52 | flag.set() 53 | case concurrent.futures.Future(): 54 | flag.set_result(None) 55 | case threading.Event(): 56 | flag.set() 57 | case _: 58 | raise TypeError(f"Unsupported type of a flag: {flag!r}") 59 | 60 | 61 | def check_flag( 62 | flag: Flag | None, 63 | ) -> bool | None: 64 | """ 65 | Check if a flag is raised. 66 | """ 67 | match flag: 68 | case None: 69 | return None 70 | case asyncio.Future(): 71 | return flag.done() 72 | case asyncio.Event(): 73 | return flag.is_set() 74 | case concurrent.futures.Future(): 75 | return flag.done() 76 | case threading.Event(): 77 | return flag.is_set() 78 | case _: 79 | raise TypeError(f"Unsupported type of a flag: {flag!r}") 80 | -------------------------------------------------------------------------------- /docs/results.rst: -------------------------------------------------------------------------------- 1 | ================ 2 | Results delivery 3 | ================ 4 | 5 | All handlers can return arbitrary JSON-serializable values. 6 | These values are then put to the resource status under the name of the handler: 7 | 8 | .. code-block:: python 9 | 10 | import kopf 11 | 12 | @kopf.on.create('kopfexamples') 13 | def create_kex_1(**_): 14 | return 100 15 | 16 | @kopf.on.create('kopfexamples') 17 | def create_kex_2(uid, **_): 18 | return {'r1': random.randint(0, 100), 'r2': random.randint(100, 999)} 19 | 20 | These results can be seen in the object's content: 21 | 22 | .. code-block:: console 23 | 24 | $ kubectl get -o yaml kex kopf-example-1 25 | 26 | .. code-block:: none 27 | 28 | ... 29 | status: 30 | create_kex_1: 100 31 | create_kex_2: 32 | r1: 66 33 | r2: 666 34 | 35 | The function results can be used to communicate between handlers through 36 | resource itself, assuming that handlers do not know in which order they 37 | will be invoked (due to error handling and retrying), and to be able to 38 | restore in case of operator failures & restarts: 39 | 40 | .. code-block:: python 41 | 42 | import kopf 43 | import pykube 44 | 45 | @kopf.on.create('kopfexamples') 46 | def create_job(status, **_): 47 | if not status.get('create_pvc', {}): 48 | raise kopf.TemporaryError("PVC is not created yet.", delay=10) 49 | 50 | pvc_name = status['create_pvc']['name'] 51 | 52 | api = pykube.HTTPClient(pykube.KubeConfig.from_env()) 53 | obj = pykube.Job(api, {...}) # use pvc_name here 54 | obj.create() 55 | return {'name': obj.name} 56 | 57 | @kopf.on.create('kopfexamples') 58 | def create_pvc(**_): 59 | api = pykube.HTTPClient(pykube.KubeConfig.from_env()) 60 | obj = pykube.PersistentVolumeClaim(api, {...}) 61 | obj.create() 62 | return {'name': obj.name} 63 | 64 | .. note:: 65 | 66 | In this example, the handlers are *intentionally* put in such an order 67 | that the first handler always fails on the first attempt. Having them 68 | in the proper order (PVC first, Job afterwards) will make it work smoothly 69 | for most of the cases, until PVC creation fails for any temporary reason 70 | and has to be retried. The whole thing will eventually succeed anyway in 71 | 1-2 additional retries, just with less friendly messages and stack traces. 72 | -------------------------------------------------------------------------------- /examples/09-testing/test_example_09.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import subprocess 3 | import time 4 | 5 | import kopf.testing 6 | import pytest 7 | 8 | crd_yaml = os.path.relpath(os.path.join(os.path.dirname(__file__), '..', 'crd.yaml')) 9 | obj_yaml = os.path.relpath(os.path.join(os.path.dirname(__file__), '..', 'obj.yaml')) 10 | example_py = os.path.relpath(os.path.join(os.path.dirname(__file__), 'example.py')) 11 | 12 | 13 | @pytest.fixture(autouse=True) 14 | def crd_exists(): 15 | subprocess.run(f"kubectl apply -f {crd_yaml}", 16 | check=True, timeout=10, capture_output=True, shell=True) 17 | 18 | 19 | @pytest.fixture(autouse=True) 20 | def obj_absent(): 21 | # Operator is not running in fixtures, so we need a force-delete (or this patch). 22 | subprocess.run(['kubectl', 'patch', '-f', obj_yaml, 23 | '-p', '{"metadata":{"finalizers":[]}}', 24 | '--type', 'merge'], 25 | check=False, timeout=10, capture_output=True) 26 | subprocess.run(f"kubectl delete -f {obj_yaml}", 27 | check=False, timeout=10, capture_output=True, shell=True) 28 | 29 | 30 | def test_resource_lifecycle(): 31 | 32 | # To prevent lengthy threads in the loop executor when the process exits. 33 | settings = kopf.OperatorSettings() 34 | settings.watching.server_timeout = 10 35 | 36 | # Run an operator and simulate some activity with the operated resource. 37 | with kopf.testing.KopfRunner( 38 | ['run', '--all-namespaces', '--verbose', '--standalone', example_py], 39 | timeout=60, settings=settings, 40 | ) as runner: 41 | 42 | subprocess.run(f"kubectl create -f {obj_yaml}", 43 | shell=True, check=True, timeout=10, capture_output=True) 44 | time.sleep(5) # give it some time to react 45 | subprocess.run(f"kubectl delete -f {obj_yaml}", 46 | shell=True, check=True, timeout=10, capture_output=True) 47 | time.sleep(1) # give it some time to react 48 | 49 | # Ensure that the operator did not die on start, or during the operation. 50 | assert runner.exception is None 51 | assert runner.exit_code == 0 52 | 53 | # There are usually more than these messages, but we only check for the certain ones. 54 | assert '[default/kopf-example-1] Creation is in progress:' in runner.output 55 | assert '[default/kopf-example-1] Something was logged here.' in runner.output 56 | -------------------------------------------------------------------------------- /tests/registries/test_resumes_mixed_in.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import kopf 4 | from kopf._core.intents.causes import HANDLER_REASONS, Reason 5 | 6 | 7 | @pytest.mark.parametrize('deleted', [True, False, None]) 8 | @pytest.mark.parametrize('reason', HANDLER_REASONS) 9 | def test_resumes_ignored_for_non_initial_causes( 10 | reason, deleted, cause_factory, resource): 11 | 12 | registry = kopf.get_default_registry() 13 | cause = cause_factory(resource=resource, reason=reason, initial=False, 14 | body={'metadata': {'deletionTimestamp': '...'} if deleted else {}}) 15 | 16 | @kopf.on.resume(*resource) 17 | def fn(**_): 18 | pass 19 | 20 | handlers = registry._changing.get_handlers(cause) 21 | assert len(handlers) == 0 22 | 23 | 24 | @pytest.mark.parametrize('reason', list(set(HANDLER_REASONS) - {Reason.DELETE})) 25 | def test_resumes_selected_for_initial_non_deletions( 26 | reason, cause_factory, resource): 27 | 28 | registry = kopf.get_default_registry() 29 | cause = cause_factory(resource=resource, reason=reason, initial=True) 30 | 31 | @kopf.on.resume(*resource) 32 | def fn(**_): 33 | pass 34 | 35 | handlers = registry._changing.get_handlers(cause) 36 | assert len(handlers) == 1 37 | assert handlers[0].fn is fn 38 | 39 | 40 | @pytest.mark.parametrize('reason', [Reason.DELETE]) 41 | def test_resumes_ignored_for_initial_deletions_by_default( 42 | reason, cause_factory, resource): 43 | 44 | registry = kopf.get_default_registry() 45 | cause = cause_factory(resource=resource, reason=reason, initial=True, 46 | body={'metadata': {'deletionTimestamp': '...'}}) 47 | 48 | @kopf.on.resume(*resource) 49 | def fn(**_): 50 | pass 51 | 52 | handlers = registry._changing.get_handlers(cause) 53 | assert len(handlers) == 0 54 | 55 | 56 | @pytest.mark.parametrize('reason', [Reason.DELETE]) 57 | def test_resumes_selected_for_initial_deletions_when_explicitly_marked( 58 | reason, cause_factory, resource): 59 | 60 | registry = kopf.get_default_registry() 61 | cause = cause_factory(resource=resource, reason=reason, initial=True, 62 | body={'metadata': {'deletionTimestamp': '...'}}) 63 | 64 | @kopf.on.resume(*resource, deleted=True) 65 | def fn(**_): 66 | pass 67 | 68 | handlers = registry._changing.get_handlers(cause) 69 | assert len(handlers) == 1 70 | assert handlers[0].fn is fn 71 | -------------------------------------------------------------------------------- /tests/test_filtering_helpers.py: -------------------------------------------------------------------------------- 1 | import kopf 2 | 3 | 4 | def _never1(*_, **__): 5 | return False 6 | 7 | 8 | def _never2(*_, **__): 9 | return False 10 | 11 | 12 | def _always1(*_, **__): 13 | return True 14 | 15 | 16 | def _always2(*_, **__): 17 | return True 18 | 19 | 20 | def test_notfn_when_true(): 21 | combined = kopf.not_(_always1) 22 | result = combined() 23 | assert result is False 24 | 25 | 26 | def test_notfn_when_false(): 27 | combined = kopf.not_(_never1) 28 | result = combined() 29 | assert result is True 30 | 31 | 32 | def test_allfn_when_all_are_true(): 33 | combined = kopf.all_([_always1, _always2]) 34 | result = combined() 35 | assert result is True 36 | 37 | 38 | def test_allfn_when_one_is_false(): 39 | combined = kopf.all_([_always1, _never1]) 40 | result = combined() 41 | assert result is False 42 | 43 | 44 | def test_allfn_when_all_are_false(): 45 | combined = kopf.all_([_never1, _never2]) 46 | result = combined() 47 | assert result is False 48 | 49 | 50 | def test_allfn_when_no_functions(): 51 | combined = kopf.all_([]) 52 | result = combined() 53 | assert result is True 54 | 55 | 56 | def test_anyfn_when_all_are_true(): 57 | combined = kopf.any_([_always1, _always2]) 58 | result = combined() 59 | assert result is True 60 | 61 | 62 | def test_anyfn_when_one_is_false(): 63 | combined = kopf.any_([_always1, _never1]) 64 | result = combined() 65 | assert result is True 66 | 67 | 68 | def test_anyfn_when_all_are_false(): 69 | combined = kopf.any_([_never1, _never2]) 70 | result = combined() 71 | assert result is False 72 | 73 | 74 | def test_anyfn_when_no_functions(): 75 | combined = kopf.any_([]) 76 | result = combined() 77 | assert result is False 78 | 79 | 80 | def test_nonefn_when_all_are_true(): 81 | combined = kopf.none_([_always1, _always2]) 82 | result = combined() 83 | assert result is False 84 | 85 | 86 | def test_nonefn_when_one_is_false(): 87 | combined = kopf.none_([_always1, _never1]) 88 | result = combined() 89 | assert result is False 90 | 91 | 92 | def test_nonefn_when_all_are_false(): 93 | combined = kopf.none_([_never1, _never2]) 94 | result = combined() 95 | assert result is True 96 | 97 | 98 | def test_nonefn_when_no_functions(): 99 | combined = kopf.none_([]) 100 | result = combined() 101 | assert result is True 102 | -------------------------------------------------------------------------------- /tests/admission/test_jsonpatch.py: -------------------------------------------------------------------------------- 1 | from kopf._cogs.structs.patches import Patch 2 | 3 | 4 | def test_addition_of_the_key(): 5 | body = {'abc': 456} 6 | patch = Patch(body=body) 7 | patch['xyz'] = 123 8 | jsonpatch = patch.as_json_patch() 9 | assert jsonpatch == [ 10 | {'op': 'add', 'path': '/xyz', 'value': 123}, 11 | ] 12 | 13 | 14 | def test_replacement_of_the_key(): 15 | body = {'xyz': 456} 16 | patch = Patch(body=body) 17 | patch['xyz'] = 123 18 | jsonpatch = patch.as_json_patch() 19 | assert jsonpatch == [ 20 | {'op': 'replace', 'path': '/xyz', 'value': 123}, 21 | ] 22 | 23 | 24 | def test_removal_of_the_key(): 25 | patch = Patch() 26 | patch['xyz'] = None 27 | jsonpatch = patch.as_json_patch() 28 | assert jsonpatch == [ 29 | {'op': 'remove', 'path': '/xyz'}, 30 | ] 31 | 32 | 33 | def test_addition_of_the_subkey(): 34 | body = {'xyz': {'def': 456}} 35 | patch = Patch(body=body) 36 | patch['xyz'] = {'abc': 123} 37 | jsonpatch = patch.as_json_patch() 38 | assert jsonpatch == [ 39 | {'op': 'add', 'path': '/xyz/abc', 'value': 123}, 40 | ] 41 | 42 | def test_replacement_of_the_subkey(): 43 | body = {'xyz': {'abc': 456}} 44 | patch = Patch(body=body) 45 | patch['xyz'] = {'abc': 123} 46 | jsonpatch = patch.as_json_patch() 47 | assert jsonpatch == [ 48 | {'op': 'replace', 'path': '/xyz/abc', 'value': 123}, 49 | ] 50 | 51 | 52 | def test_addition_of_the_sub_subkey(): 53 | body = {'xyz': {'uvw': 123}} 54 | patch = Patch(body=body) 55 | patch['xyz'] = {'abc': {'def': {'ghi': 456}}} 56 | jsonpatch = patch.as_json_patch() 57 | assert jsonpatch == [ 58 | {'op': 'add', 'path': '/xyz/abc', 'value': {'def': {'ghi': 456}}}, 59 | ] 60 | 61 | 62 | def test_removal_of_the_subkey(): 63 | patch = Patch() 64 | patch['xyz'] = {'abc': None} 65 | jsonpatch = patch.as_json_patch() 66 | assert jsonpatch == [ 67 | {'op': 'remove', 'path': '/xyz/abc'}, 68 | ] 69 | 70 | 71 | def test_escaping_of_key(): 72 | patch = Patch() 73 | patch['~xyz/test'] = {'abc': None} 74 | jsonpatch = patch.as_json_patch() 75 | assert jsonpatch == [ 76 | {'op': 'remove', 'path': '/~0xyz~1test/abc'} 77 | ] 78 | 79 | 80 | def test_recursive_escape_of_key(): 81 | patch = Patch() 82 | patch['x/y/~z'] = {'a/b/~0c': None} 83 | jsonpatch = patch.as_json_patch() 84 | assert jsonpatch == [ 85 | {'op': 'remove', 'path': '/x~1y~1~0z/a~1b~1~00c'}, 86 | ] 87 | -------------------------------------------------------------------------------- /tests/registries/test_matching_of_callbacks.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | from unittest.mock import Mock 3 | 4 | import pytest 5 | 6 | from kopf._cogs.structs.bodies import Body 7 | from kopf._cogs.structs.dicts import parse_field 8 | from kopf._cogs.structs.references import Resource 9 | from kopf._core.intents.causes import WatchingCause 10 | from kopf._core.intents.handlers import WatchingHandler 11 | from kopf._core.intents.registries import match, prematch 12 | 13 | 14 | # Used in the tests. Must be global-scoped, or its qualname will be affected. 15 | def some_fn(x=None): 16 | pass 17 | 18 | 19 | @pytest.fixture() 20 | def callback(): 21 | mock = Mock() 22 | mock.return_value = True 23 | return mock 24 | 25 | 26 | @pytest.fixture(params=['annotations', 'labels', 'value', 'when']) 27 | def handler(request, callback, selector): 28 | handler = WatchingHandler( 29 | selector=selector, 30 | annotations={'known': 'value'}, 31 | labels={'known': 'value'}, 32 | field=parse_field('spec.field'), 33 | value='value', 34 | when=None, 35 | fn=some_fn, id='a', param=None, errors=None, timeout=None, retries=None, backoff=None, 36 | ) 37 | if request.param in ['annotations', 'labels']: 38 | handler = dataclasses.replace(handler, **{request.param: {'known': callback}}) 39 | else: 40 | handler = dataclasses.replace(handler, **{request.param: callback}) 41 | return handler 42 | 43 | 44 | @pytest.fixture() 45 | def cause(cause_factory, callback): 46 | return cause_factory( 47 | cls=WatchingCause, 48 | body=Body(dict( 49 | metadata=dict( 50 | labels={'known': 'value'}, 51 | annotations={'known': 'value'}, 52 | ), 53 | spec=dict( 54 | field='value', 55 | ), 56 | ))) 57 | 58 | 59 | @pytest.mark.parametrize('match_fn', [match, prematch]) 60 | def test_callback_is_called_with_matching_resource( 61 | match_fn, callback, handler, cause, 62 | ): 63 | result = match_fn(handler=handler, cause=cause) 64 | assert result 65 | assert callback.called 66 | 67 | 68 | @pytest.mark.parametrize('match_fn', [match, prematch]) 69 | def test_callback_is_not_called_with_mismatching_resource( 70 | match_fn, callback, handler, cause, 71 | ): 72 | cause = dataclasses.replace(cause, resource=Resource(group='x', version='y', plural='z')) 73 | result = match_fn(handler=handler, cause=cause) 74 | assert not result 75 | assert not callback.called 76 | -------------------------------------------------------------------------------- /examples/06-peering/README.md: -------------------------------------------------------------------------------- 1 | # Kopf example with multiple processes and development mode 2 | 3 | When multiple operators start for the same cluster (in the cluster or outside), 4 | they become aware about each other, and exchange the basic information about 5 | their liveness and the priorities, and cooperate to avoid the undesired 6 | side-effects (e.g., duplicated children creation, infinite cross-changes). 7 | 8 | The main use-case for this is the development mode: when a developer starts 9 | an operator on their workstation, all the deployed operators should pause 10 | and stop processing of the objects, until the developer's operator exits. 11 | 12 | In shell A, start an operator: 13 | 14 | ```bash 15 | kopf run example.py --verbose 16 | ``` 17 | 18 | In shell B, start another operator: 19 | 20 | ```bash 21 | kopf run example.py --verbose 22 | ``` 23 | 24 | Notice how both A & B complain about the same-priority sibling operator: 25 | 26 | ``` 27 | [2019-02-05 20:42:39,052] kopf.peering [WARNING ] Possibly conflicting operators with the same priority: [Peer(089e5a18a71d4660b07ae37acc776250, priority=0, lastseen=2019-02-05 19:42:38.932613, lifetime=0:01:00)]. 28 | ``` 29 | 30 | ``` 31 | [2019-02-05 20:42:39,223] kopf.peering [WARNING ] Possibly conflicting operators with the same priority: [Peer(590581cbceff403e90a3e874379c4daf, priority=0, lastseen=2019-02-05 19:42:23.241150, lifetime=0:01:00)]. 32 | ``` 33 | 34 | Now, stop the operator B wtih Ctrl+C (twice), and start it with `--dev` option 35 | (equivalent to `--priority 666`): 36 | 37 | ```bash 38 | kopf run example.py --verbose --dev 39 | ``` 40 | 41 | Observe how the operator A pauses and lets 42 | operator B to take control over the objects. 43 | 44 | ``` 45 | [2019-02-05 20:43:40,360] kopf.peering [INFO ] Pausing operations in favour of [Peer(54e7054f28d948c4985db79410c9ef4a, priority=666, lastseen=2019-02-05 19:43:40.166561, lifetime=0:01:00)]. 46 | ``` 47 | 48 | Stop the operator B again with Ctrl+C (twice). 49 | The operator A resumes its operations: 50 | 51 | ``` 52 | [2019-02-05 20:44:54,311] kopf.peering [INFO ] Resuming operations after the pause. 53 | ``` 54 | 55 | The same can be achieved with the explicit CLI commands: 56 | 57 | ```bash 58 | kopf freeze --lifetime 60 --priority 100 59 | kopf resume 60 | ``` 61 | 62 | ``` 63 | [2019-02-05 20:45:34,354] kopf.peering [INFO ] Pausing operations in favour of [Peer(manual, priority=100, lastseen=2019-02-05 19:45:34.226070, lifetime=0:01:00)]. 64 | [2019-02-05 20:45:49,427] kopf.peering [INFO ] Resuming operations after the pause. 65 | ``` 66 | -------------------------------------------------------------------------------- /tests/k8s/test_watching_with_freezes.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | 4 | import pytest 5 | 6 | from kopf._cogs.aiokits.aiotoggles import ToggleSet 7 | from kopf._cogs.clients.watching import streaming_block 8 | 9 | 10 | async def test_pausing_is_ignored_if_turned_off( 11 | resource, namespace, timer, caplog, assert_logs): 12 | caplog.set_level(logging.DEBUG) 13 | 14 | operator_paused = ToggleSet(any) 15 | await operator_paused.make_toggle(False) 16 | 17 | with timer: 18 | async with streaming_block( 19 | resource=resource, 20 | namespace=namespace, 21 | operator_paused=operator_paused, 22 | ): 23 | pass 24 | 25 | assert timer.seconds < 0.2 # no waits, exits as soon as possible 26 | assert_logs([], prohibited=[ 27 | r"Pausing the watch-stream for", 28 | r"Resuming the watch-stream for", 29 | ]) 30 | 31 | 32 | async def test_pausing_waits_forever_if_not_resumed( 33 | resource, namespace, timer, caplog, assert_logs): 34 | caplog.set_level(logging.DEBUG) 35 | 36 | operator_paused = ToggleSet(any) 37 | await operator_paused.make_toggle(True) 38 | 39 | async def do_it(): 40 | async with streaming_block( 41 | resource=resource, 42 | namespace=namespace, 43 | operator_paused=operator_paused, 44 | ): 45 | pass 46 | 47 | with pytest.raises(asyncio.TimeoutError), timer: 48 | await asyncio.wait_for(do_it(), timeout=0.5) 49 | 50 | assert timer.seconds >= 0.5 51 | assert_logs([ 52 | r"Pausing the watch-stream for", 53 | ], prohibited=[ 54 | r"Resuming the watch-stream for", 55 | ]) 56 | 57 | 58 | async def test_pausing_waits_until_resumed( 59 | resource, namespace, timer, caplog, assert_logs): 60 | caplog.set_level(logging.DEBUG) 61 | 62 | operator_paused = ToggleSet(any) 63 | conflicts_found = await operator_paused.make_toggle(True) 64 | 65 | async def delayed_resuming(delay: float): 66 | await asyncio.sleep(delay) 67 | await conflicts_found.turn_to(False) 68 | 69 | with timer: 70 | asyncio.create_task(delayed_resuming(0.2)) 71 | async with streaming_block( 72 | resource=resource, 73 | namespace=namespace, 74 | operator_paused=operator_paused, 75 | ): 76 | pass 77 | 78 | assert timer.seconds >= 0.2 79 | assert timer.seconds <= 0.5 80 | assert_logs([ 81 | r"Pausing the watch-stream for", 82 | r"Resuming the watch-stream for", 83 | ]) 84 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.yaml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: Suggest an idea for this project 3 | labels: [enhancement, triage] 4 | body: 5 | 6 | - type: markdown 7 | attributes: 8 | value: > 9 | Please provide as much information as possible. 10 | All fields are optional, but a lack of information 11 | may result in a delayed response and time-consumung iterations. 12 | 13 | - type: markdown 14 | attributes: 15 | value: > 16 | _If you feel confident with English, please use English. 17 | If not, feel free to use your native or preferred language 18 | (avoid metaphors and idioms — they do not auto-translate well). 19 | The answers will be in English._ 20 | 21 | - type: textarea 22 | id: problem 23 | attributes: 24 | label: Problem 25 | description: > 26 | What problem do you currently face so that you want this feature? 27 | Are there existing features or tools close to solving this problem? 28 | Why don't they work? 29 | placeholder: > 30 | E.g.: I want to access several in-memory indicies anywhere in the code 31 | nested in multiple levels of function calls. Currently, I have to pass 32 | the indicies from the handlers down the stack in every call, 33 | which complicates the code and makes it too wordy. 34 | 35 | - type: textarea 36 | id: proposal 37 | attributes: 38 | label: Proposal 39 | description: > 40 | Describe the solution you would like to have. 41 | Are there any other ways of achieving the same goal? 42 | Why is this proposal better than those alternatives? 43 | placeholder: > 44 | E.g.: Either store the indicies in global variables, 45 | or pass a single kwarg with all indicies at once, 46 | not as separate kwargs. 47 | 48 | - type: textarea 49 | id: code 50 | attributes: 51 | label: Code 52 | description: > 53 | A code snippet showing the new feature in action, at least as an idea. 54 | (No backticks — the code will be formatted automatically.) 55 | placeholder: | 56 | # E.g.: 57 | import kopf 58 | 59 | my_index = kopf.Index() 60 | 61 | @kopf.index('pods', target=my_index) 62 | def fn(**_): 63 | ... 64 | 65 | def any_function(): 66 | for key, val in my_index.items(): 67 | ... 68 | 69 | - type: textarea 70 | id: extra 71 | attributes: 72 | label: Additional information 73 | description: > 74 | Additional information in free form — everything you would like to add. 75 | -------------------------------------------------------------------------------- /tests/handling/daemons/test_timer_filtration.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pytest 4 | 5 | import kopf 6 | 7 | # We assume that the handler filtering is tested in details elsewhere (for all handlers). 8 | # Here, we only test if it is applied or not applied. 9 | 10 | 11 | async def test_timer_filtration_satisfied( 12 | settings, resource, dummy, caplog, assert_logs, k8s_mocked, simulate_cycle): 13 | caplog.set_level(logging.DEBUG) 14 | 15 | @kopf.timer(*resource, id='fn', 16 | labels={'a': 'value', 'b': kopf.PRESENT, 'c': kopf.ABSENT}, 17 | annotations={'x': 'value', 'y': kopf.PRESENT, 'z': kopf.ABSENT}) 18 | async def fn(**kwargs): 19 | dummy.kwargs = kwargs 20 | dummy.steps['called'].set() 21 | 22 | event_body = {'metadata': {'labels': {'a': 'value', 'b': '...'}, 23 | 'annotations': {'x': 'value', 'y': '...'}, 24 | 'finalizers': [settings.persistence.finalizer]}} 25 | await simulate_cycle(event_body) 26 | 27 | await dummy.steps['called'].wait() 28 | await dummy.wait_for_daemon_done() 29 | 30 | 31 | @pytest.mark.parametrize('labels, annotations', [ 32 | # Annotations mismatching (but labels are matching): 33 | ({'a': 'value', 'b': '...'}, {'x': 'mismatching-value', 'b': '...'}, ), # x must be "value". 34 | ({'a': 'value', 'b': '...'}, {'x': 'value', 'y': '...', 'z': '...'}), # z must be absent 35 | ({'a': 'value', 'b': '...'}, {'x': 'value'}), # y must be present 36 | # labels mismatching (but annotations are matching): 37 | ({'a': 'mismatching-value', 'b': '...'}, {'x': 'value', 'y': '...'}), 38 | ({'a': 'value', 'b': '...', 'c': '...'}, {'x': 'value', 'y': '...'}), 39 | ({'a': 'value'}, {'x': 'value', 'y': '...'}), 40 | ]) 41 | async def test_timer_filtration_mismatched( 42 | settings, resource, mocker, labels, annotations, 43 | caplog, assert_logs, k8s_mocked, simulate_cycle): 44 | caplog.set_level(logging.DEBUG) 45 | spawn_daemons = mocker.patch('kopf._core.engines.daemons.spawn_daemons') 46 | 47 | @kopf.timer(*resource, id='fn', 48 | labels={'a': 'value', 'b': kopf.PRESENT, 'c': kopf.ABSENT}, 49 | annotations={'x': 'value', 'y': kopf.PRESENT, 'z': kopf.ABSENT}) 50 | async def fn(**kwargs): 51 | pass 52 | 53 | event_body = {'metadata': {'labels': labels, 54 | 'annotations': annotations, 55 | 'finalizers': [settings.persistence.finalizer]}} 56 | await simulate_cycle(event_body) 57 | 58 | assert spawn_daemons.called 59 | assert spawn_daemons.call_args_list[0][1]['handlers'] == [] 60 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.yaml: -------------------------------------------------------------------------------- 1 | name: Bug Report 2 | description: File a bug report 3 | labels: [bug, triage] 4 | body: 5 | 6 | - type: markdown 7 | attributes: 8 | value: > 9 | Please provide as much information as possible. 10 | All fields are optional, but a lack of information 11 | may result in a delayed response and time-consuming iterations. 12 | 13 | - type: markdown 14 | attributes: 15 | value: > 16 | _If you feel confident with English, please use English. 17 | If not, feel free to use your native or preferred language 18 | (avoid metaphors and idioms — they do not auto-translate well). 19 | The answers will be in English._ 20 | 21 | - type: textarea 22 | id: summary 23 | attributes: 24 | label: Long story short 25 | description: > 26 | Please describe your problem in 1-3 sentences. 27 | What has happened? What has not happened, but should have? 28 | placeholder: > 29 | A feature X behaves "this" way, but expected to behave "that" way. 30 | The misbehaviour leads to unexpected results or side-effects A, B, C. 31 | 32 | - type: input 33 | id: kopf-version 34 | attributes: 35 | label: Kopf version 36 | placeholder: e.g. 1.31.2 37 | - type: input 38 | id: kubernetes-version 39 | attributes: 40 | label: Kubernetes version 41 | placeholder: e.g. 1.22 or 1.22.0 42 | - type: input 43 | id: python-version 44 | attributes: 45 | label: Python version 46 | placeholder: e.g. 3.10 or pypy-3.10-7.3.13 47 | 48 | - type: textarea 49 | id: code 50 | attributes: 51 | label: Code 52 | description: > 53 | The code snippet of the operator to reproduce the issue. 54 | (No backticks — the code will be formatted automatically.) 55 | placeholder: | 56 | # For example: 57 | import kopf 58 | 59 | @kopf.on.create('kopfexamples') 60 | def create_fn(**_): 61 | pass 62 | render: python 63 | 64 | - type: textarea 65 | id: logs 66 | attributes: 67 | label: Logs 68 | description: > 69 | The output that highlights the failure of the operator 70 | and shows what happened immediately before and after. 71 | (No backticks — the logs will be formatted automatically.) 72 | placeholder: | 73 | [2020-01-01 12:34:56,789] [DEBUG ] Starting Kopf 1.31.1. 74 | [2020-01-01 12:34:56,890] [DEBUG ] ... 75 | render: none 76 | 77 | - type: textarea 78 | id: extra 79 | attributes: 80 | label: Additional information 81 | description: > 82 | Everything you would like to add that can help to identify the issue. 83 | --------------------------------------------------------------------------------