├── .coveragerc ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_request.md │ └── user_story.md ├── dependabot.yml ├── pull_request_template.md └── workflows │ ├── build-library.yml │ ├── lint-code.yml │ ├── publish-document.yml │ └── publish-library.yml ├── .gitignore ├── .isort.cfg ├── .pre-commit-config.yaml ├── .prettierignore ├── .prettierrc.yaml ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── SECURITY.md ├── docs ├── API References.md ├── architecture.drawio.png ├── architecture.md └── index.md ├── mkdocs.yml ├── oper8 ├── __init__.py ├── __main__.py ├── cmd │ ├── __init__.py │ ├── base.py │ ├── check_heartbeat.py │ ├── run_operator_cmd.py │ └── setup_vcs_cmd.py ├── component.py ├── config │ ├── __init__.py │ ├── config.py │ ├── config.yaml │ ├── config_validation.yaml │ └── validation.py ├── constants.py ├── controller.py ├── dag │ ├── __init__.py │ ├── completion_state.py │ ├── graph.py │ ├── node.py │ └── runner.py ├── decorator.py ├── deploy_manager │ ├── __init__.py │ ├── base.py │ ├── dry_run_deploy_manager.py │ ├── kube_event.py │ ├── openshift_deploy_manager.py │ ├── owner_references.py │ └── replace_utils.py ├── exceptions.py ├── log_format.py ├── managed_object.py ├── patch.py ├── patch_strategic_merge.py ├── reconcile.py ├── rollout_manager.py ├── session.py ├── setup_vcs.py ├── status.py ├── temporary_patch │ ├── temporary_patch_component.py │ └── temporary_patch_controller.py ├── test_helpers │ ├── __init__.py │ ├── data │ │ ├── controller.template │ │ ├── test_ca.crt │ │ └── test_ca.key │ ├── helpers.py │ ├── kub_mock.py │ ├── oper8x_helpers.py │ └── pwm_helpers.py ├── utils.py ├── vcs.py ├── verify_resources.py ├── version.py ├── watch_manager │ ├── __init__.py │ ├── ansible_watch_manager │ │ ├── __init__.py │ │ ├── ansible_watch_manager.py │ │ ├── modules │ │ │ ├── k8s_application.py │ │ │ └── log_rotator.py │ │ └── resources │ │ │ ├── playbook-base.yaml │ │ │ └── roles │ │ │ └── oper8_app │ │ │ ├── defaults │ │ │ └── main.yml │ │ │ └── tasks │ │ │ └── main.yml │ ├── base.py │ ├── dry_run_watch_manager.py │ └── python_watch_manager │ │ ├── __init__.py │ │ ├── filters │ │ ├── __init__.py │ │ ├── common.py │ │ ├── filters.py │ │ └── manager.py │ │ ├── leader_election │ │ ├── __init__.py │ │ ├── annotation.py │ │ ├── base.py │ │ ├── dry_run.py │ │ ├── lease.py │ │ └── life.py │ │ ├── python_watch_manager.py │ │ ├── reconcile_process_entrypoint.py │ │ ├── threads │ │ ├── __init__.py │ │ ├── base.py │ │ ├── heartbeat.py │ │ ├── reconcile.py │ │ ├── timer.py │ │ └── watch.py │ │ └── utils │ │ ├── __init__.py │ │ ├── common.py │ │ ├── constants.py │ │ ├── log_handler.py │ │ └── types.py └── x │ ├── __init__.py │ ├── datastores │ ├── __init__.py │ ├── connection_base.py │ ├── cos │ │ ├── __init__.py │ │ ├── connection.py │ │ ├── factory.py │ │ └── interfaces.py │ ├── factory_base.py │ ├── interfaces.py │ ├── postgres │ │ ├── __init__.py │ │ ├── connection.py │ │ ├── factory.py │ │ └── interfaces.py │ └── redis │ │ ├── __init__.py │ │ ├── connection.py │ │ ├── factory.py │ │ └── interfaces.py │ ├── oper8x_component.py │ └── utils │ ├── __init__.py │ ├── abc_static.py │ ├── common.py │ ├── constants.py │ ├── deps_annotation.py │ ├── tls.py │ └── tls_context │ ├── __init__.py │ ├── factory.py │ ├── interface.py │ ├── internal.py │ └── public.py ├── pyproject.toml ├── renovate.json ├── scripts ├── check_heartbeat.sh ├── document.sh ├── fmt.sh ├── lint.sh └── run_tests.sh ├── setup_requirements.txt ├── tests ├── __init__.py ├── conftest.py ├── dag │ ├── test_completion_state.py │ ├── test_graph.py │ ├── test_node.py │ └── test_runner.py ├── deploy_manager │ ├── test_dry_run_deploy_manager.py │ ├── test_openshift_deploy_manager.py │ ├── test_owner_references.py │ └── test_replace_utils.py ├── temporary_patch │ ├── test_temporary_patch_component.py │ └── test_temporary_patch_controller.py ├── test_component.py ├── test_config.py ├── test_controller.py ├── test_decorator.py ├── test_exceptions.py ├── test_main.py ├── test_patch.py ├── test_patch_strategic_merge.py ├── test_reconcile.py ├── test_rollout_manager.py ├── test_session.py ├── test_setup_vcs.py ├── test_status.py ├── test_utils.py ├── test_vcs.py ├── test_verify_resources.py ├── watch_manager │ ├── __init__.py │ ├── ansible_watch_manager │ │ ├── modules │ │ │ ├── test_k8s_application.py │ │ │ └── test_log_rotator.py │ │ └── test_ansible_watch_manager.py │ ├── python_watch_manager │ │ ├── filters │ │ │ ├── test_common_filters.py │ │ │ ├── test_filters.py │ │ │ └── test_manager.py │ │ ├── leader_election │ │ │ ├── test_annotation.py │ │ │ ├── test_init.py │ │ │ ├── test_lease.py │ │ │ └── test_life.py │ │ ├── test_python_watch_manager.py │ │ ├── test_reconcile_process_entrypoint.py │ │ ├── threads │ │ │ ├── test_heartbeat.py │ │ │ ├── test_reconcile_thread.py │ │ │ ├── test_timer_thread.py │ │ │ └── test_watch_thread.py │ │ └── utils │ │ │ └── test_pwm_util_common.py │ ├── test_dry_run_watch_manager.py │ └── test_watch_manager_base.py └── x │ ├── datastores │ ├── __init__.py │ ├── cos │ │ ├── __init__.py │ │ └── test_cos_connection.py │ ├── postgres │ │ ├── __init__.py │ │ ├── test_pg_factory.py │ │ ├── test_postgres_connection.py │ │ └── util.py │ ├── redis │ │ ├── __init__.py │ │ ├── test_redis_connection.py │ │ ├── test_redis_factory.py │ │ └── test_redis_interfaces.py │ └── test_factory_base.py │ └── utils │ ├── __init__.py │ ├── test_abc_static.py │ ├── test_common.py │ ├── test_deps_annotation.py │ ├── test_tls.py │ ├── test_tls_context.py │ └── tls_context │ ├── snapshots │ └── __init__.py │ ├── test_internal.py │ ├── test_tls_preconditions.py │ └── util.py └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = 3 | oper8/test_helpers/** 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "" 5 | labels: "" 6 | assignees: "" 7 | --- 8 | 9 | ## Describe the bug 10 | 11 | A clear and concise description of what the bug is. 12 | 13 | ## Platform 14 | 15 | Please provide details about the environment you are using, including the following: 16 | 17 | - Interpreter version: 18 | - Library version: 19 | 20 | ## Sample Code 21 | 22 | Please include a minimal sample of the code that will (if possible) reproduce the bug in isolation 23 | 24 | ## Expected behavior 25 | 26 | A clear and concise description of what you expected to happen. 27 | 28 | ## Observed behavior 29 | 30 | What you see happening (error messages, stack traces, etc...) 31 | 32 | ## Additional context 33 | 34 | Add any other context about the problem here. 35 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "" 5 | labels: "" 6 | assignees: "" 7 | --- 8 | 9 | ## Is your feature request related to a problem? Please describe. 10 | 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | ## Describe the solution you'd like 14 | 15 | A clear and concise description of what you want to happen. 16 | 17 | ## Describe alternatives you've considered 18 | 19 | A clear and concise description of any alternative solutions or features you've considered. 20 | 21 | ## Additional context 22 | 23 | Add any other context about the feature request here. 24 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/user_story.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: User story 3 | about: A user-oriented story describing a piece of work to do 4 | title: "" 5 | labels: "" 6 | assignees: "" 7 | --- 8 | 9 | ## Description 10 | 11 | As a , I want to , so that I can 12 | 13 | ## Discussion 14 | 15 | Provide detailed discussion here 16 | 17 | ## Acceptance Criteria 18 | 19 | 20 | 21 | - [ ] Unit tests cover new/changed code 22 | - [ ] Examples build against new/changed code 23 | - [ ] READMEs are updated 24 | - [ ] Type of [semantic version](https://semver.org/) change is identified 25 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/" 5 | schedule: 6 | interval: "daily" 7 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | ## Related Issue 6 | Supports #ISSUE_NUMBER 7 | 8 | ## Related PRs 9 | This PR is not dependent on any other PR 10 | 11 | ## What this PR does / why we need it 12 | 13 | ## Special notes for your reviewer 14 | 15 | ## If applicable** 16 | - [ ] this PR contains documentation 17 | - [ ] this PR contains unit tests 18 | - [ ] this PR has been tested for backwards compatibility 19 | 20 | ## What gif most accurately describes how I feel towards this PR? 21 | ![Example of a gif](https://media.giphy.com/media/snwvCcEKk33Hy/giphy.gif) 22 | -------------------------------------------------------------------------------- /.github/workflows/build-library.yml: -------------------------------------------------------------------------------- 1 | # Copyright The Caikit Authors 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | name: Build and Test 16 | 17 | on: 18 | push: 19 | branches: [ "main" ] 20 | pull_request: 21 | branches: [ "main" ] 22 | 23 | jobs: 24 | build: 25 | runs-on: ubuntu-latest 26 | strategy: 27 | matrix: 28 | python-version: 29 | - setup: '3.9' 30 | tox: 'py39' 31 | - setup: '3.10' 32 | tox: 'py310' 33 | - setup: '3.11' 34 | tox: 'py311' 35 | - setup: '3.12' 36 | tox: 'py312' 37 | 38 | steps: 39 | - uses: actions/checkout@v4 40 | - name: Set up Python ${{ matrix.python-version.setup }} 41 | uses: actions/setup-python@v5 42 | with: 43 | python-version: ${{ matrix.python-version.setup }} 44 | - name: Install dependencies 45 | run: | 46 | python -m pip install --upgrade pip 47 | python -m pip install -r setup_requirements.txt 48 | - name: Build and test with tox 49 | run: tox -e ${{ matrix.python-version.tox }} -- tests 50 | -------------------------------------------------------------------------------- /.github/workflows/lint-code.yml: -------------------------------------------------------------------------------- 1 | # Copyright The Caikit Authors 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | name: Lint and Format 16 | 17 | on: 18 | push: 19 | branches: [ "main" ] 20 | pull_request: 21 | branches: [ "main" ] 22 | 23 | jobs: 24 | build: 25 | runs-on: ubuntu-latest 26 | steps: 27 | - uses: actions/checkout@v4 28 | - name: Set up Python 3.9 29 | uses: actions/setup-python@v5 30 | with: 31 | python-version: 3.9 32 | - name: Install dependencies 33 | run: | 34 | python -m pip install --upgrade pip 35 | python -m pip install -r setup_requirements.txt 36 | - name: Check Formatting 37 | run: tox -e fmt 38 | - name: Check Linting 39 | run: tox -e lint 40 | -------------------------------------------------------------------------------- /.github/workflows/publish-document.yml: -------------------------------------------------------------------------------- 1 | name: Publish document 2 | on: 3 | push: 4 | branches: 5 | - main 6 | permissions: 7 | contents: write 8 | jobs: 9 | deploy: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | name: Checkout Repository 14 | - name: Configure Git Credentials 15 | run: | 16 | git config user.name github-actions[bot] 17 | git config user.email github-actions[bot]@users.noreply.github.com 18 | - uses: actions/setup-python@v5 19 | name: Setup Python 20 | with: 21 | python-version: 3.12 22 | - name: Set Cache ID 23 | run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV 24 | - uses: actions/cache@v4 25 | name: Restore/Save Cache 26 | with: 27 | key: mkdocs-material-${{ env.cache_id }} 28 | path: .cache 29 | restore-keys: | 30 | mkdocs-material- 31 | - name: Install mkdocs dependencies 32 | run: pip install tox 33 | - name: Deploy documentation to GitHub Pages 34 | run: tox -e docs -- gh-deploy --force 35 | -------------------------------------------------------------------------------- /.github/workflows/publish-library.yml: -------------------------------------------------------------------------------- 1 | # Copyright The Caikit Authors 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | name: Publish 16 | 17 | on: 18 | release: 19 | types: [published] 20 | 21 | jobs: 22 | build: 23 | runs-on: ubuntu-latest 24 | steps: 25 | - uses: actions/checkout@v4 26 | - name: Set up Python 27 | uses: actions/setup-python@v5 28 | - name: Build and check package 29 | run: | 30 | pip install tox 31 | tox -e build,twinecheck 32 | - name: Upload package 33 | if: github.event_name == 'release' 34 | uses: pypa/gh-action-pypi-publish@release/v1 35 | with: 36 | password: ${{ secrets.PYPI_TOKEN }} 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg-info 2 | *.pyc 3 | __pycache__ 4 | .coverage 5 | .coverage.* 6 | durations/* 7 | coverage*.xml 8 | dist 9 | htmlcov 10 | build 11 | 12 | # IDEs 13 | .vscode/ 14 | .idea/ 15 | 16 | # Env files 17 | .env 18 | 19 | # Virtual Env 20 | venv/ 21 | # Mac personalization files 22 | *.DS_Store 23 | 24 | # Tox envs 25 | .tox 26 | 27 | # Setuptools scm version 28 | _version.py 29 | 30 | # mkdocs documentation 31 | /site 32 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | profile=black 3 | from_first=true 4 | import_heading_future=Future 5 | import_heading_stdlib=Standard 6 | import_heading_thirdparty=Third Party 7 | import_heading_firstparty=First Party 8 | import_heading_localfolder=Local 9 | known_firstparty=alog,aconfig 10 | known_localfolder=oper8,tests 11 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/mirrors-prettier 3 | rev: v2.1.2 4 | hooks: 5 | - id: prettier 6 | - repo: https://github.com/psf/black 7 | rev: 22.3.0 8 | hooks: 9 | - id: black 10 | - repo: https://github.com/PyCQA/isort 11 | rev: 5.11.5 12 | hooks: 13 | - id: isort 14 | - repo: local 15 | hooks: 16 | - id: ruff-check 17 | name: ruff 18 | entry: scripts/lint.sh 19 | language: script 20 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | # ignore jsonl files 2 | *.jsonl 3 | **/.github 4 | -------------------------------------------------------------------------------- /.prettierrc.yaml: -------------------------------------------------------------------------------- 1 | tabWidth: 2 2 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # 3 | # List of approvers for oper8 repository 4 | # 5 | ##################################################### 6 | # 7 | # Learn about CODEOWNERS file format: 8 | # https://help.github.com/en/articles/about-code-owners 9 | # 10 | 11 | * @gabe-l-hart @HonakerM 12 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Community Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | - Using welcoming and inclusive language 18 | - Being respectful of differing viewpoints and experiences 19 | - Gracefully accepting constructive criticism 20 | - Focusing on what is best for the community 21 | - Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | - The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | - Trolling, insulting/derogatory comments, and personal or political attacks 28 | - Public or private harassment 29 | - Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | - Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the [project team](./CODEOWNERS). All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # oper8 2 | 3 | Oper8 is a framework for writing kubernetes operators in python. It implements many common patterns used by large cloud applications that are reusable across many operator design patterns. 4 | 5 | Documentation: https://ibm.github.io/oper8/ 6 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | This security policy applies to the `oper8` python library and source code. 4 | 5 | ## Supported Versions 6 | 7 | The Oper8 project provides community support only for the last minor version: bug fixes are released either as part of the next minor version or as an on-demand patch version. Independent of which version is next, all patch versions are cumulative, meaning that they represent the state of our `main` branch at the moment of the release. For instance, if the latest version is 0.10.0, bug fixes are released either as part of 0.11.0 or 0.10.1. 8 | 9 | Security fixes are given priority and might be enough to cause a new version to be released. 10 | 11 | ## Reporting a Vulnerability 12 | 13 | To report a security issue, please email the [CODEOWNERS](./CODEOWNERS) with a description of the issue, the steps you took to create the issue, affected versions, and if known, mitigations for the issue. 14 | 15 | Our maintainers will acknowledge receiving your email within 3 working days. It follows a 90 day disclosure timeline. 16 | -------------------------------------------------------------------------------- /docs/API References.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ::: oper8 4 | -------------------------------------------------------------------------------- /docs/architecture.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/oper8/63d30d3d38147283ba9621c625aa83f4461ead22/docs/architecture.drawio.png -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Oper8 2 | 3 | Oper8 is a framework for writing kubernetes operators in python. It implements many common patterns used by large cloud applications that are reusable across many operator design patterns ([GitHub](https://github.com/IBM/oper8/tree/main)). 4 | 5 | For API details, please refer to the [API References](API References.md). 6 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: "oper8" 2 | nav: 3 | - Home: index.md 4 | - Architecture: architecture.md 5 | - API References: API References.md 6 | theme: 7 | name: material 8 | font: 9 | text: IBM Plex Sans 10 | code: IBM Plex Mono 11 | palette: 12 | # Dark Mode 13 | - scheme: slate 14 | toggle: 15 | icon: material/weather-sunny 16 | name: Dark mode 17 | primary: black 18 | accent: deep purple 19 | # Light Mode 20 | - scheme: default 21 | toggle: 22 | icon: material/weather-night 23 | name: Light mode 24 | primary: white 25 | accent: indigo 26 | plugins: 27 | - search 28 | - mkdocstrings: 29 | handlers: 30 | python: 31 | options: 32 | show_submodules: true 33 | -------------------------------------------------------------------------------- /oper8/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Package exports 3 | """ 4 | 5 | # Local 6 | from . import config, reconcile, status, watch_manager 7 | from .component import Component 8 | from .controller import Controller 9 | from .dag import Graph, ResourceNode 10 | from .decorator import component, controller 11 | from .deploy_manager import DeployManagerBase 12 | from .exceptions import ( 13 | assert_cluster, 14 | assert_config, 15 | assert_precondition, 16 | assert_verified, 17 | ) 18 | from .reconcile import ReconcileManager, ReconciliationResult 19 | from .session import Session 20 | from .temporary_patch.temporary_patch_controller import TemporaryPatchController 21 | from .verify_resources import verify_resource 22 | -------------------------------------------------------------------------------- /oper8/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | The main module provides the executable entrypoint for oper8 4 | """ 5 | 6 | # Standard 7 | from typing import Dict, Tuple 8 | import argparse 9 | 10 | # First Party 11 | import aconfig 12 | import alog 13 | 14 | # Local 15 | from .cmd import CheckHeartbeatCmd, CmdBase, RunOperatorCmd, SetupVCSCmd 16 | from .component import config 17 | from .config import library_config 18 | from .log_format import Oper8JsonFormatter 19 | 20 | ## Constants ################################################################### 21 | 22 | log = alog.use_channel("MAIN") 23 | 24 | ## Helpers ##################################################################### 25 | 26 | 27 | def add_library_config_args(parser, config_obj=None, path=None): 28 | """Automatically add args for all elements of the library config""" 29 | path = path or [] 30 | setters = {} 31 | config_obj = config_obj or config 32 | for key, val in config_obj.items(): 33 | sub_path = path + [key] 34 | 35 | # If this is a nested arg, recurse 36 | if isinstance(val, aconfig.AttributeAccessDict): 37 | sub_setters = add_library_config_args(parser, config_obj=val, path=sub_path) 38 | for dest_name, nested_path in sub_setters.items(): 39 | setters[dest_name] = nested_path 40 | 41 | # Otherwise, add an argument explicitly 42 | else: 43 | arg_name = ".".join(sub_path) 44 | dest_name = "_".join(sub_path) 45 | kwargs = { 46 | "default": val, 47 | "dest": dest_name, 48 | "help": f"Library config override for {arg_name} (see oper8.config)", 49 | } 50 | if isinstance(val, list): 51 | kwargs["nargs"] = "*" 52 | elif isinstance(val, bool): 53 | kwargs["action"] = "store_true" 54 | else: 55 | type_name = None 56 | if val is not None: 57 | type_name = type(val) 58 | kwargs["type"] = type_name 59 | 60 | if ( 61 | f"--{arg_name}" 62 | not in parser._option_string_actions # pylint: disable=protected-access 63 | ): 64 | parser.add_argument(f"--{arg_name}", **kwargs) 65 | setters[dest_name] = sub_path 66 | return setters 67 | 68 | 69 | def update_library_config(args, setters): 70 | """Update the library config values based on the parsed arguments""" 71 | for dest_name, config_path in setters.items(): 72 | config_obj = library_config 73 | while len(config_path) > 1: 74 | config_obj = config_obj[config_path[0]] 75 | config_path = config_path[1:] 76 | config_obj[config_path[0]] = getattr(args, dest_name) 77 | 78 | 79 | def add_command( 80 | subparsers: argparse._SubParsersAction, 81 | cmd: CmdBase, 82 | ) -> Tuple[argparse.ArgumentParser, Dict[str, str]]: 83 | """Add the subparser and set up the default fun call""" 84 | parser = cmd.add_subparser(subparsers) 85 | parser.set_defaults(func=cmd.cmd) 86 | library_args = parser.add_argument_group("Library Configuration") 87 | library_config_setters = add_library_config_args(library_args) 88 | return parser, library_config_setters 89 | 90 | 91 | ## Main ######################################################################## 92 | 93 | 94 | def main(): 95 | """The main module provides the executable entrypoint for oper8""" 96 | parser = argparse.ArgumentParser(description=__doc__) 97 | 98 | # Add the subcommands 99 | subparsers = parser.add_subparsers(help="Available commands", dest="command") 100 | run_operator_cmd = RunOperatorCmd() 101 | run_operator_parser, library_config_setters = add_command( 102 | subparsers, run_operator_cmd 103 | ) 104 | run_health_check_cmd = CheckHeartbeatCmd() 105 | add_command(subparsers, run_health_check_cmd) 106 | setup_vcs_cmd = SetupVCSCmd() 107 | add_command(subparsers, setup_vcs_cmd) 108 | 109 | # Use a preliminary parser to check for the presence of a command and fall 110 | # back to the default command if not found 111 | check_parser = argparse.ArgumentParser(add_help=False) 112 | check_parser.add_argument("command", nargs="?") 113 | check_args, _ = check_parser.parse_known_args() 114 | if check_args.command not in subparsers.choices: 115 | args = run_operator_parser.parse_args() 116 | else: 117 | args = parser.parse_args() 118 | 119 | # Provide overrides to the library configs 120 | update_library_config(args, library_config_setters) 121 | 122 | # Reconfigure logging 123 | alog.configure( 124 | default_level=config.log_level, 125 | filters=config.log_filters, 126 | formatter=Oper8JsonFormatter() if config.log_json else "pretty", 127 | thread_id=config.log_thread_id, 128 | ) 129 | 130 | # Run the command's function 131 | args.func(args) 132 | 133 | 134 | if __name__ == "__main__": # pragma: no cover 135 | main() 136 | -------------------------------------------------------------------------------- /oper8/cmd/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module holds all of the command classes for oper8's main entrypoint 3 | """ 4 | 5 | # Local 6 | from .base import CmdBase 7 | from .check_heartbeat import CheckHeartbeatCmd 8 | from .run_operator_cmd import RunOperatorCmd 9 | from .setup_vcs_cmd import SetupVCSCmd 10 | -------------------------------------------------------------------------------- /oper8/cmd/base.py: -------------------------------------------------------------------------------- 1 | """ 2 | Base class for all oper8 commands 3 | """ 4 | 5 | # Standard 6 | import abc 7 | import argparse 8 | 9 | 10 | class CmdBase(abc.ABC): 11 | __doc__ = __doc__ 12 | 13 | @abc.abstractmethod 14 | def add_subparser( 15 | self, 16 | subparsers: argparse._SubParsersAction, 17 | ) -> argparse.ArgumentParser: 18 | """Add this command's argument parser subcommand 19 | 20 | Args: 21 | subparsers (argparse._SubParsersAction): The subparser section for 22 | the central main parser 23 | 24 | Returns: 25 | subparser (argparse.ArgumentParser): The configured parser for this 26 | command 27 | """ 28 | 29 | @abc.abstractmethod 30 | def cmd(self, args: argparse.Namespace): 31 | """Execute the command with the parsed arguments 32 | 33 | Args: 34 | args (argparse.Namespace): The parsed command line arguments 35 | """ 36 | -------------------------------------------------------------------------------- /oper8/cmd/check_heartbeat.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is the main entrypoint command for running the operator 3 | """ 4 | # Standard 5 | from datetime import datetime, timedelta 6 | from pathlib import Path 7 | import argparse 8 | 9 | # First Party 10 | import alog 11 | 12 | # Local 13 | from .. import config 14 | from ..watch_manager.python_watch_manager.threads.heartbeat import HeartbeatThread 15 | from .base import CmdBase 16 | 17 | log = alog.use_channel("MAIN") 18 | 19 | 20 | class CheckHeartbeatCmd(CmdBase): 21 | __doc__ = __doc__ 22 | 23 | ## Interface ## 24 | 25 | def add_subparser( 26 | self, 27 | subparsers: argparse._SubParsersAction, 28 | ) -> argparse.ArgumentParser: 29 | parser = subparsers.add_parser("check-heartbeat", help=__doc__) 30 | runtime_args = parser.add_argument_group("Check Heartbeat Configuration") 31 | runtime_args.add_argument( 32 | "--delta", 33 | "-d", 34 | required=True, 35 | type=int, 36 | help="Max time allowed since last check", 37 | ) 38 | runtime_args.add_argument( 39 | "--file", 40 | "-f", 41 | default=config.python_watch_manager.heartbeat_file, 42 | help="Location of health check file. Defaults to config based.", 43 | ) 44 | return parser 45 | 46 | def cmd(self, args: argparse.Namespace): 47 | """Run command to validate a health check file""" 48 | 49 | # Validate args 50 | assert args.delta is not None 51 | assert args.file is not None 52 | 53 | # Ensure file exists 54 | file_path = Path(args.file) 55 | if not file_path.exists(): 56 | log.error(f"Health Check failed: {file_path} does not exist") 57 | raise FileNotFoundError() 58 | 59 | # Read and the most recent time from the health check 60 | last_log_time = file_path.read_text().strip() 61 | last_time = datetime.strptime(last_log_time, HeartbeatThread._DATE_FORMAT) 62 | 63 | if last_time + timedelta(seconds=args.delta) < datetime.now(): 64 | msg = f"Health Check failed: {last_log_time} is to old" 65 | log.error(msg) 66 | raise KeyError(msg) 67 | -------------------------------------------------------------------------------- /oper8/cmd/setup_vcs_cmd.py: -------------------------------------------------------------------------------- 1 | """ 2 | CLI command for setting up a VCS version repo 3 | """ 4 | # Standard 5 | import argparse 6 | 7 | # First Party 8 | import alog 9 | 10 | # Local 11 | from ..setup_vcs import DEFAULT_DEST, DEFAULT_TAG_EXPR, setup_vcs 12 | from .base import CmdBase 13 | 14 | log = alog.use_channel("CMD-VCS") 15 | 16 | 17 | class SetupVCSCmd(CmdBase): 18 | __doc__ = __doc__ 19 | 20 | def add_subparser( 21 | self, 22 | subparsers: argparse._SubParsersAction, 23 | ) -> argparse.ArgumentParser: 24 | """Add the subparser for this command""" 25 | parser = subparsers.add_parser( 26 | "setup-vcs", 27 | help="Initialize a clean git repo to use with VCS versioning", 28 | ) 29 | command_args = parser.add_argument_group("Command Arguments") 30 | command_args.add_argument( 31 | "--source", 32 | "-s", 33 | required=True, 34 | help="Source repo to seed the clean git history", 35 | ) 36 | command_args.add_argument( 37 | "--destination", 38 | "-d", 39 | default=DEFAULT_DEST, 40 | help="Destination directory in which to place the clean git history", 41 | ) 42 | command_args.add_argument( 43 | "--branch-expr", 44 | "-b", 45 | nargs="*", 46 | default=None, 47 | help="Regular expression(s) to use to identify branches", 48 | ) 49 | command_args.add_argument( 50 | "--tag-expr", 51 | "-te", 52 | nargs="*", 53 | default=DEFAULT_TAG_EXPR, 54 | help="Regular expression(s) to use to identify tags", 55 | ) 56 | command_args.add_argument( 57 | "--force", 58 | "-f", 59 | action="store_true", 60 | default=False, 61 | help="Force overwrite existing destination", 62 | ) 63 | return parser 64 | 65 | def cmd(self, args: argparse.Namespace): 66 | setup_vcs( 67 | source=args.source, 68 | destination=args.destination, 69 | branch_expr=args.branch_expr, 70 | tag_expr=args.tag_expr, 71 | force=args.force, 72 | ) 73 | -------------------------------------------------------------------------------- /oper8/config/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Base operator config module. The config here is only used as a baseline bootup 3 | config. All application-specific config must come from the app_config. 4 | """ 5 | 6 | # Standard 7 | import sys as _sys 8 | 9 | # Local 10 | from .config import library_config 11 | 12 | 13 | # Define __getattr__ on this module to delegate to the library config. 14 | def __getattr__(name): 15 | if name in library_config or hasattr({}, name): 16 | return getattr(library_config, name) 17 | raise AttributeError(f"No such config attribute {name}") 18 | 19 | 20 | # Only expose the library config keys 21 | __all__ = list(library_config.keys()) 22 | -------------------------------------------------------------------------------- /oper8/config/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module just loads config at import time and does the initial log config 3 | """ 4 | 5 | # Standard 6 | import os 7 | 8 | # First Party 9 | import aconfig 10 | import alog 11 | 12 | # Local 13 | from .validation import get_invalid_params 14 | 15 | # Read the library config, allowing env overrides 16 | library_config = aconfig.Config.from_yaml( 17 | os.path.join(os.path.dirname(__file__), "config.yaml"), 18 | override_env_vars=True, 19 | ) 20 | 21 | # Parse the validation file, not allowing env overrides 22 | validation_config = aconfig.Config.from_yaml( 23 | os.path.join(os.path.dirname(__file__), "config_validation.yaml"), 24 | override_env_vars=False, 25 | ) 26 | 27 | # Validate the loaded config values 28 | invalid_params = get_invalid_params(library_config, validation_config) 29 | assert ( 30 | not invalid_params 31 | ), f"Library configuration found invalid values: {invalid_params}" 32 | 33 | # Do initial alog configuration 34 | alog.configure( 35 | default_level=library_config.log_level, 36 | filters=library_config.log_filters, 37 | formatter="json" if library_config.log_json else "pretty", 38 | thread_id=library_config.log_thread_id, 39 | ) 40 | -------------------------------------------------------------------------------- /oper8/config/config_validation.yaml: -------------------------------------------------------------------------------- 1 | log_level: 2 | type: enum 3 | values: 4 | [ 5 | "disable", 6 | "off", 7 | "error", 8 | "warning", 9 | "info", 10 | "trace", 11 | "debug", 12 | "debug1", 13 | "debug2", 14 | "debug3", 15 | "debug4", 16 | ] 17 | log_filters: 18 | type: str 19 | log_json: 20 | type: bool 21 | log_thread_id: 22 | type: bool 23 | 24 | dry_run: 25 | type: bool 26 | working_dir: 27 | type: str 28 | optional: true 29 | 30 | standalone: 31 | type: bool 32 | 33 | strict_versioning: 34 | type: bool 35 | 36 | supported_versions: 37 | type: list 38 | item_type: str 39 | 40 | deploy_retries: 41 | type: int 42 | min: 0 43 | 44 | requeue_after_seconds: 45 | type: int 46 | min: 0 47 | 48 | rollout_manager: 49 | deploy_threads: 50 | type: int 51 | optional: true 52 | min: 1 53 | verify_threads: 54 | type: int 55 | optional: true 56 | min: 1 57 | 58 | ansible_watch_manager: 59 | log_file_dir: 60 | type: str 61 | optional: true 62 | kill_max_wait: 63 | type: number 64 | min: 0 65 | 66 | python_watch_manager: 67 | max_concurrent_reconciles: 68 | type: int 69 | optional: true 70 | reconcile_period: 71 | type: str 72 | 73 | heartbeat_file: 74 | type: str 75 | optional: true 76 | heartbeat_period: 77 | type: str 78 | 79 | process_context: 80 | type: str 81 | watch_dependent_resources: 82 | type: bool 83 | filter: 84 | type: str 85 | 86 | lock: 87 | type: 88 | type: str 89 | 90 | name: 91 | type: str 92 | optional: true 93 | 94 | namespace: 95 | type: str 96 | 97 | pod_name: 98 | type: str 99 | optional: true 100 | 101 | poll_time: 102 | type: str 103 | -------------------------------------------------------------------------------- /oper8/constants.py: -------------------------------------------------------------------------------- 1 | """ 2 | Shared module to hold constant values for the library 3 | """ 4 | 5 | # Reconciliation configuration annotations 6 | PAUSE_ANNOTATION_NAME = "oper8.org/pause-execution" 7 | CONFIG_DEFAULTS_ANNOTATION_NAME = "oper8.org/config-defaults" 8 | 9 | # Leadership annotations 10 | LEASE_NAME_ANNOTATION_NAME = "oper8.org/lease-name" 11 | LEASE_TIME_ANNOTATION_NAME = "oper8.org/lease-time" 12 | 13 | # Log config annotations 14 | LOG_DEFAULT_LEVEL_NAME = "oper8.org/log-default-level" 15 | LOG_FILTERS_NAME = "oper8.org/log-filters" 16 | LOG_THREAD_ID_NAME = "oper8.org/log-thread-id" 17 | LOG_JSON_NAME = "oper8.org/log-json" 18 | 19 | # List to keep track of all oper8-managed annotations which should be passed 20 | # from a parent Application to a child CR. 21 | # NOTE: The only excluded annotations are 22 | # * temporary patches as this is managed by the oper8_temporary_patch module 23 | # directly 24 | # * leadership annotations since those may differ per CR instance 25 | PASSTHROUGH_ANNOTATIONS = [ 26 | CONFIG_DEFAULTS_ANNOTATION_NAME, 27 | LOG_DEFAULT_LEVEL_NAME, 28 | LOG_FILTERS_NAME, 29 | LOG_JSON_NAME, 30 | LOG_THREAD_ID_NAME, 31 | PAUSE_ANNOTATION_NAME, 32 | ] 33 | 34 | # BACKWARDS COMPATIBILITY: We maintain the ALL_ANNOTATIONS name for 35 | # compatibility with old code that accessed this directly 36 | ALL_ANNOTATIONS = PASSTHROUGH_ANNOTATIONS 37 | 38 | # The name of the annotation used to attach TemporaryPatch resources to a given 39 | # oper8-managed CR 40 | TEMPORARY_PATCHES_ANNOTATION_NAME = "oper8.org/temporary-patches" 41 | 42 | # The name of the annotation used to indicate the internal name of each 43 | # oper8-managed resource 44 | INTERNAL_NAME_ANNOTATION_NAME = "oper8.org/internal-name" 45 | # Keeping the misspelled variable for backward compatibility: https://github.com/IBM/oper8/pull/133#discussion_r1820696481 46 | INTERNAL_NAME_ANOTATION_NAME = INTERNAL_NAME_ANNOTATION_NAME 47 | 48 | # Default namespace if none given 49 | DEFAULT_NAMESPACE = "default" 50 | 51 | # Delimiter used for nested dict keys 52 | NESTED_DICT_DELIM = "." 53 | 54 | # Name of the spec section used to provide config overrides 55 | CONFIG_OVERRIDES = "configOverrides" 56 | -------------------------------------------------------------------------------- /oper8/dag/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Package exports 3 | """ 4 | 5 | # Local 6 | from .completion_state import CompletionState 7 | from .graph import Graph 8 | from .node import Node, ResourceNode 9 | from .runner import DagHaltError, Runner 10 | -------------------------------------------------------------------------------- /oper8/dag/completion_state.py: -------------------------------------------------------------------------------- 1 | """ 2 | CompletionState holds info about how a DAG Runner completes 3 | """ 4 | 5 | 6 | # Standard 7 | from typing import List, Optional 8 | 9 | # First Party 10 | import alog 11 | 12 | # Local 13 | from .node import Node 14 | 15 | log = alog.use_channel("DAG") 16 | 17 | ## Completion state ############################################################## 18 | 19 | 20 | class CompletionState: 21 | """ 22 | This class holds the definition of a CompletionState which manages all 23 | the information about how the nodes in a rollout Runner terminated 24 | """ 25 | 26 | def __init__( # pylint: disable=too-many-arguments 27 | self, 28 | verified_nodes: Optional[List[Node]] = None, 29 | unverified_nodes: Optional[List[Node]] = None, 30 | failed_nodes: Optional[List[Node]] = None, 31 | unstarted_nodes: Optional[List[Node]] = None, 32 | exception: Optional[Exception] = None, 33 | ): 34 | """Construct with each node set""" 35 | self.verified_nodes = set(verified_nodes or []) 36 | self.unverified_nodes = set(unverified_nodes or []) 37 | self.failed_nodes = set(failed_nodes or []) 38 | self.unstarted_nodes = set(unstarted_nodes or []) 39 | self.all_nodes = ( 40 | self.verified_nodes.union(self.unverified_nodes) 41 | .union(self.failed_nodes) 42 | .union(self.unstarted_nodes) 43 | ) 44 | self.exception = exception 45 | 46 | # Make sure the sets are not overlapping 47 | sets = [ 48 | self.verified_nodes, 49 | self.unverified_nodes, 50 | self.failed_nodes, 51 | self.unstarted_nodes, 52 | ] 53 | for i, node_set_a in enumerate(sets): 54 | for j, node_set_b in enumerate(sets): 55 | if i != j: 56 | assert not node_set_a.intersection(node_set_b), ( 57 | "Programming Error: " 58 | + f"CompletionState constructed with overlapping sets: {str(self)}" 59 | ) 60 | 61 | def __str__(self): 62 | return "\n".join( 63 | [ 64 | f"[NODES] {key}: {list(sorted(nodes))}" 65 | for key, nodes in [ 66 | ("Verified", [node.get_name() for node in self.verified_nodes]), 67 | ("Unverified", [node.get_name() for node in self.unverified_nodes]), 68 | ("Failed", [node.get_name() for node in self.failed_nodes]), 69 | ("Unstarted", [node.get_name() for node in self.unstarted_nodes]), 70 | ] 71 | ] 72 | + [ 73 | f"Exception: {self.exception}", 74 | ] 75 | ) 76 | 77 | def __eq__(self, other: "CompletionState"): 78 | return ( 79 | self.verified_nodes == other.verified_nodes 80 | and self.unverified_nodes == other.unverified_nodes 81 | and self.failed_nodes == other.failed_nodes 82 | and self.unstarted_nodes == other.unstarted_nodes 83 | ) 84 | 85 | def deploy_completed(self) -> bool: 86 | """Determine if the dag completed all nodes through to the deploy 87 | step 88 | 89 | NOTE: An empty node set is considered completed 90 | 91 | Returns: 92 | completed: bool 93 | True if there are no failed nodes and no unstarted nodes 94 | """ 95 | return not self.failed_nodes and not self.unstarted_nodes 96 | 97 | def verify_completed(self) -> bool: 98 | """Determine if the dag completed all nodes through to the verification 99 | step 100 | 101 | NOTE: An empty node set is considered verified 102 | 103 | Returns: 104 | completed: bool 105 | True if there are no nodes found outside of the verified_nodes 106 | and there is no exception in the termination state 107 | """ 108 | return ( 109 | not self.unverified_nodes 110 | and not self.failed_nodes 111 | and not self.unstarted_nodes 112 | and not self.exception 113 | ) 114 | 115 | def failed(self) -> bool: 116 | """Determine if any of the nodes failed 117 | 118 | Returns: 119 | failed: bool 120 | True if there are any nodes in the failed state or there is a 121 | fatal error 122 | """ 123 | return bool(self.failed_nodes) or self._fatal_exception() 124 | 125 | def _fatal_exception(self): 126 | """Helper to determine if there is a fatal exception in the state""" 127 | return self.exception is not None and getattr( 128 | self.exception, "is_fatal_error", True 129 | ) 130 | -------------------------------------------------------------------------------- /oper8/dag/graph.py: -------------------------------------------------------------------------------- 1 | """ 2 | Graph holds information about a Directed Acyclic Graph 3 | """ 4 | 5 | 6 | # Standard 7 | from typing import Callable, List, Optional 8 | 9 | # First Party 10 | import alog 11 | 12 | # Local 13 | from .node import Node 14 | 15 | log = alog.use_channel("DAG") 16 | 17 | ## Graph Class ############################################################## 18 | 19 | 20 | class Graph: 21 | """Class for representing an instance of a Graph. Handles adding and removing nodes 22 | as well as graph functions like flattening""" 23 | 24 | def __init__(self) -> None: 25 | self.__node_dict = {} 26 | 27 | # Add the root node of the Graph. Every member of this graph is also a child 28 | # of the root node 29 | self.__root_node = Node() 30 | self.__node_dict[self.__root_node.get_name()] = self.__root_node 31 | 32 | ## Properties ############################################################## 33 | 34 | @property 35 | def root(self) -> Node: # pylint: disable=invalid-name 36 | """The root node of the Graph""" 37 | return self.__root_node 38 | 39 | @property 40 | def node_dict(self) -> dict: 41 | """Dictionary of all node names and their nodes""" 42 | return self.__node_dict 43 | 44 | ## Modifiers ############################################################## 45 | 46 | def add_node(self, node: Node): 47 | """Add node to graph 48 | Args: 49 | node: Node 50 | The node to be added to the Dag. 51 | """ 52 | if not node.get_name(): 53 | raise ValueError("None is reserved for the root node of the dag Graph") 54 | 55 | if node.get_name() in self.node_dict: 56 | raise ValueError( 57 | f"Only one node with id {node.get_name()} can be added to a Graph" 58 | ) 59 | 60 | self.node_dict[node.get_name()] = node 61 | self.root.add_child(node) 62 | 63 | def add_node_dependency( 64 | self, parent_node: Node, child_node: Node, edge_fn: Optional[Callable] = None 65 | ): 66 | """Add dependency or "edge" to graph between two nodes. This is the same 67 | as doing parent_node.add_dependency(child_node) 68 | Args: 69 | parent_node: Node 70 | The parent or dependent node aka the node that must wait 71 | child_node: Node 72 | The child or dependency node aka the node that must be deployed first 73 | edge_fn: 74 | """ 75 | if not self.get_node(parent_node.get_name()): 76 | raise ValueError(f"Parent node {parent_node} is not present in Graph") 77 | 78 | if not self.get_node(child_node.get_name()): 79 | raise ValueError(f"Child node {child_node} is not present in Graph") 80 | 81 | # Make sure edits are applied to the nodes already present in the graph 82 | parent_node = self.get_node(parent_node.get_name()) 83 | child_node = self.get_node(child_node.get_name()) 84 | 85 | parent_node.add_child(child_node, edge_fn) 86 | 87 | ## Accessors ############################################################## 88 | 89 | def get_node(self, name: str): # pylint: disable=invalid-name 90 | """Get the node with name""" 91 | return self.node_dict.get(name) 92 | 93 | def get_all_nodes(self): 94 | """Get list of all nodes""" 95 | return [node for node, _ in self.root.get_children()] 96 | 97 | def has_node(self, node: Node): # pylint: disable=invalid-name 98 | """Check if node is in graph""" 99 | return self.root.has_child(node) 100 | 101 | def empty(self): 102 | """Check if a graph is empty""" 103 | return len(self.root.get_children()) == 0 104 | 105 | ## Graph Functions ############################################################## 106 | 107 | def topology(self) -> List["Node"]: 108 | """Get a list of nodes in deployment order""" 109 | topology = self.root.topology() 110 | topology.remove(self.root) 111 | return topology 112 | 113 | ## Internal Functions ############################################################## 114 | 115 | def __repr__(self): 116 | str_list = [] 117 | for child, _ in self.root.get_children(): 118 | child_str_list = [node.get_name() for node, _ in child.get_children()] 119 | str_list.append(f"{child.get_name()}:[{','.join(child_str_list)}]") 120 | 121 | return f"Graph({{{','.join(str_list)}}})" 122 | 123 | def __contains__(self, item: Node): 124 | return self.has_node(item) 125 | 126 | def __iter__(self): 127 | """Iterate over all child nodes""" 128 | return self.get_all_nodes().__iter__() 129 | -------------------------------------------------------------------------------- /oper8/decorator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Decorator for making the authoring of "pure" components easier 3 | """ 4 | 5 | # Standard 6 | from typing import Callable, Dict, Optional, Type 7 | 8 | # Local 9 | from .component import Component 10 | from .controller import Controller 11 | 12 | 13 | def component(name: str) -> Callable[[Type], Type]: 14 | """The @component decorator is the primary entrypoint for creating an 15 | oper8.Component. It ensures the wrapped type's interface matches the expected 16 | Component interface, including the "name" class attribute. 17 | 18 | Args: 19 | name: str 20 | The name string will be set as the class property for the wrapped 21 | class 22 | 23 | Returns: 24 | decorator: Callable[[Type[Component]], Type[Component]] 25 | The decorator function that will be invoked on construction of 26 | decorated classes 27 | """ 28 | 29 | def decorator(cls: Type[Component]) -> Type[Component]: 30 | cls.name = name 31 | return cls 32 | 33 | return decorator 34 | 35 | 36 | def controller( # pylint: disable=too-many-arguments 37 | group: str, 38 | version: str, 39 | kind: str, 40 | finalizer: str = None, 41 | extra_properties: Optional[Dict[str, any]] = None, 42 | ) -> Callable[[Type[Controller]], Type[Controller]]: 43 | """The @controller decorator is the primary entrypoint for creating an 44 | oper8.Controller. It ensures the wrapped type's interface matches the 45 | required Controller interface, including class properties. 46 | 47 | NOTE: The `extra_properties` argument is an entrypoint for loosely coupled 48 | Controller-specific configuration that is tied to the specific 49 | WatchManager implementation being used. The current list of useful 50 | properties is: 51 | 52 | * disable_vcs: This can be used to tell the AnsibleWatchManager 53 | that the Controller will not use ansible-vcs, even if other 54 | Controllers managed by the same operator do. 55 | * pwm_filters: This can be used to tell the PythonWatchManager of any 56 | additional watch filters. If value is a list then the filters are added 57 | to all watches including dependent watches. If value is a dict than 58 | it expects the keys to be the resource global id with the values being a list 59 | of filters for that resource 60 | * pwm_subsystems: This can be used to tell the PythonWatchManager of any 61 | subsystem relations. This allows a "subsystem" controller to be ran during 62 | the reconciliation of another similar to the DryRunWatchManager 63 | 64 | Args: 65 | group: str 66 | The apiVersion group for the resource this controller manages 67 | version: str 68 | The apiVersion version for the resource this controller manages 69 | kind: str 70 | The kind for the resource this controller manages 71 | extra_properties: Optional[Dict[str, any]] 72 | Extra properties that should be defined as class-properties for this 73 | controller 74 | 75 | Returns: 76 | decorator: Callable[[Type[Controller]], Type[Controller]] 77 | The decorator function that will be invoked on construction of 78 | decorated classes 79 | """ 80 | 81 | def decorator(cls: Type[Controller]) -> Type[Controller]: 82 | cls.group = group 83 | cls.version = version 84 | cls.kind = kind 85 | for key, val in (extra_properties or {}).items(): 86 | setattr(cls, key, val) 87 | if finalizer is not None: 88 | cls.finalizer = finalizer 89 | return cls 90 | 91 | return decorator 92 | -------------------------------------------------------------------------------- /oper8/deploy_manager/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The DeployManager is the abstraction in charge of interacting with the 3 | kubernetes cluster to deploy, look up, and delete resources. 4 | """ 5 | 6 | # Local 7 | from .base import DeployManagerBase, DeployMethod 8 | from .dry_run_deploy_manager import DryRunDeployManager 9 | from .kube_event import KubeEventType, KubeWatchEvent 10 | from .openshift_deploy_manager import OpenshiftDeployManager 11 | -------------------------------------------------------------------------------- /oper8/deploy_manager/kube_event.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helper module to define shared types related to Kube Events 3 | """ 4 | 5 | # Standard 6 | from dataclasses import dataclass, field 7 | from datetime import datetime 8 | from enum import Enum 9 | 10 | # First Party 11 | import alog 12 | 13 | # Local 14 | from ..managed_object import ManagedObject 15 | 16 | log = alog.use_channel("KUBEWATCH") 17 | 18 | 19 | class KubeEventType(Enum): 20 | """Enum for all possible kubernetes event types""" 21 | 22 | DELETED = "DELETED" 23 | MODIFIED = "MODIFIED" 24 | ADDED = "ADDED" 25 | 26 | 27 | @dataclass 28 | class KubeWatchEvent: 29 | """DataClass containing the type, resource, and timestamp of a 30 | particular event""" 31 | 32 | type: KubeEventType 33 | resource: ManagedObject 34 | timestamp: datetime = field(default_factory=datetime.now) 35 | -------------------------------------------------------------------------------- /oper8/deploy_manager/owner_references.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module holds common functionality that the DeployManager implementations 3 | can use to manage ownerReferences on deployed resources 4 | """ 5 | 6 | # First Party 7 | import alog 8 | 9 | # Local 10 | from ..exceptions import assert_cluster 11 | from .base import DeployManagerBase 12 | 13 | log = alog.use_channel("OWNRF") 14 | 15 | 16 | def update_owner_references( 17 | deploy_manager: DeployManagerBase, 18 | owner_cr: dict, 19 | child_obj: dict, 20 | ): 21 | """Fetch current ownerReferences and merge a reference for this CR into 22 | the child object 23 | """ 24 | 25 | # Validate the shape of the owner CR and the chid object 26 | _validate_object_struct(owner_cr) 27 | _validate_object_struct(child_obj) 28 | 29 | # Fetch the current state of this object 30 | kind = child_obj["kind"] 31 | api_version = child_obj["apiVersion"] 32 | name = child_obj["metadata"]["name"] 33 | namespace = child_obj["metadata"]["namespace"] 34 | uid = child_obj["metadata"].get("uid") 35 | 36 | success, content = deploy_manager.get_object_current_state( 37 | kind=kind, name=name, api_version=api_version, namespace=namespace 38 | ) 39 | assert_cluster( 40 | success, f"Failed to fetch current state of {api_version}.{kind}/{name}" 41 | ) 42 | 43 | # Get the current ownerReferences 44 | owner_refs = [] 45 | if content is not None: 46 | owner_refs = content.get("metadata", {}).get("ownerReferences", []) 47 | log.debug3("Current owner refs: %s", owner_refs) 48 | 49 | # If the current CR is not represented and current CR is in the same 50 | # namespace as the child object, add it 51 | current_uid = owner_cr["metadata"]["uid"] 52 | log.debug3("Current CR UID: %s", current_uid) 53 | current_namespace = owner_cr["metadata"]["namespace"] 54 | log.debug3("Current CR namespace: %s", current_namespace) 55 | 56 | if current_uid == uid: 57 | log.debug2("Owner is same as child; Not adding owner ref") 58 | return 59 | 60 | if (namespace == current_namespace) and ( 61 | current_uid not in [ref["uid"] for ref in owner_refs] 62 | ): 63 | log.debug2( 64 | "Adding current CR owner reference for %s.%s/%s", 65 | api_version, 66 | kind, 67 | name, 68 | ) 69 | owner_refs.append(_make_owner_reference(owner_cr)) 70 | 71 | # Add the ownerReferences to the object that will be applied to the 72 | # cluster 73 | log.debug4("Final owner refs: %s", owner_refs) 74 | child_obj["metadata"]["ownerReferences"] = owner_refs 75 | 76 | 77 | ## Implementation Details ###################################################### 78 | 79 | 80 | def _validate_object_struct(obj: dict): 81 | """Ensure that the required portions of an object are present (kind, 82 | apiVerison, metadata.namespace, metadata.name) 83 | """ 84 | assert "kind" in obj, "Got object without 'kind'" 85 | assert "apiVersion" in obj, "Got object without 'apiVersion'" 86 | metadata = obj.get("metadata") 87 | assert isinstance(metadata, dict), "Got object with non-dict 'metadata'" 88 | assert "name" in metadata, "Got object without 'metadata.name'" 89 | assert "namespace" in metadata, "Got object without 'metadata.namespace'" 90 | 91 | 92 | def _make_owner_reference(owner_cr: dict) -> dict: 93 | """Make an owner reference for the given CR instance 94 | 95 | Error Semantics: This function makes a best-effort and does not validate the 96 | content of the owner_cr, so the resulting ownerReference may contain None 97 | entries. 98 | 99 | Args: 100 | owner_cr: dict 101 | The full CR manifest for the owning resource 102 | 103 | Returns: 104 | owner_reference: dict 105 | The dict entry for the `metadata.ownerReferences` entry of the owned 106 | object 107 | """ 108 | # NOTE: We explicitly don't set controller: True here. If two 109 | # oper8-managed resources reference the resource, only one can have 110 | # controller set to True. According to StackOverflow, this field is 111 | # only used for adoption and not garbage collection. 112 | # CITE: https://stackoverflow.com/a/65825463 113 | metadata = owner_cr.get("metadata", {}) 114 | return { 115 | "apiVersion": owner_cr.get("apiVersion"), 116 | "kind": owner_cr.get("kind"), 117 | "name": metadata.get("name"), 118 | "uid": metadata.get("uid"), 119 | # The parent will not be deleted until this object completes its 120 | # deletion 121 | "blockOwnerDeletion": True, 122 | } 123 | -------------------------------------------------------------------------------- /oper8/exceptions.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module implements custom exceptions 3 | """ 4 | 5 | ## Base Error ################################################################## 6 | 7 | 8 | class Oper8Error(Exception): 9 | """Base class for all oper8 exceptions""" 10 | 11 | def __init__(self, message: str, is_fatal_error: bool): 12 | """Construct with a flag indicating whether this is a fatal error. This 13 | will be a static property of all children. 14 | """ 15 | super().__init__(message) 16 | self._is_fatal_error = is_fatal_error 17 | 18 | @property 19 | def is_fatal_error(self): 20 | """Property indicating whether or not this error should signal a fatal 21 | state in the rollout 22 | """ 23 | return self._is_fatal_error 24 | 25 | 26 | ## Fatal Errors ################################################################ 27 | 28 | 29 | class Oper8FatalError(Oper8Error): 30 | """An Oper8FatalError is one that indicates an unexpected, and likely 31 | unrecoverable, failure during a reconciliation. 32 | """ 33 | 34 | def __init__(self, message: str = ""): 35 | super().__init__(message=message, is_fatal_error=True) 36 | 37 | 38 | class RolloutError(Oper8FatalError): 39 | """Exception indicating a failure during application rollout""" 40 | 41 | def __init__(self, message: str = "", completion_state=None): 42 | self.completion_state = completion_state 43 | super().__init__(message) 44 | 45 | 46 | class ConfigError(Oper8FatalError): 47 | """Exception caused during usage of user-provided configuration""" 48 | 49 | 50 | class ClusterError(Oper8FatalError): 51 | """Exception caused during chart construction when a cluster operation fails 52 | in an unexpected way. 53 | """ 54 | 55 | 56 | ## Expected Errors ############################################################# 57 | 58 | 59 | class Oper8ExpectedError(Oper8Error): 60 | """An Oper8ExpectedError is one that indicates an expected failure condition 61 | that should cause a reconciliation to terminate, but is expected to resolve 62 | in a subsequent reconciliation. 63 | """ 64 | 65 | def __init__(self, message: str = ""): 66 | super().__init__(message=message, is_fatal_error=False) 67 | 68 | 69 | class PreconditionError(Oper8ExpectedError): 70 | """Exception caused during chart construction when an expected precondition 71 | is not met. 72 | """ 73 | 74 | 75 | class VerificationError(Oper8ExpectedError): 76 | """Exception caused during resource verification when a desired verification 77 | state is not reached. 78 | """ 79 | 80 | 81 | ## Assertions ################################################################## 82 | 83 | 84 | def assert_precondition(condition: bool, message: str = ""): 85 | """Replacement for assert() which will throw a PreconditionError. This 86 | should be used when building a chart which requires that a precondition is 87 | met before continuing. 88 | """ 89 | if not condition: 90 | raise PreconditionError(message) 91 | 92 | 93 | def assert_verified(condition: bool, message: str = ""): 94 | """Replacement for assert() which will throw a VerificationError. This 95 | should be used when verifying the state of a resource in the cluster. 96 | """ 97 | if not condition: 98 | raise VerificationError(message) 99 | 100 | 101 | def assert_config(condition: bool, message: str = ""): 102 | """Replacement for assert() which will throw a ConfigError. This should be 103 | used when building a chart which requires that certain conditions be true in 104 | the deploy_config or app_config. 105 | """ 106 | if not condition: 107 | raise ConfigError(message) 108 | 109 | 110 | def assert_cluster(condition: bool, message: str = ""): 111 | """Replacement for assert() which will throw a ClusterError. This should 112 | be used when building a chart which requires that an operation in the 113 | cluster (such as fetching an existing secret) succeeds. 114 | """ 115 | if not condition: 116 | raise ClusterError(message) 117 | 118 | 119 | ## Compatibility Exceptions ################################################################## 120 | class Oper8DeprecationWarning(DeprecationWarning): 121 | """This warning is issued for deprecated APIs""" 122 | 123 | 124 | class Oper8PendingDeprecationWarning(PendingDeprecationWarning): 125 | """This warning is issued for APIs that are still supported but will be removed eventually""" 126 | -------------------------------------------------------------------------------- /oper8/log_format.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom logging formats that contain more detailed oper8 logs 3 | """ 4 | 5 | # First Party 6 | from alog import AlogJsonFormatter 7 | import alog 8 | 9 | log = alog.use_channel("CTRLR") 10 | 11 | 12 | class Oper8JsonFormatter(AlogJsonFormatter): 13 | """Custom Log Format that extends AlogJsonFormatter to add multiple 14 | oper8 specific fields to the json. This includes things like identifiers 15 | of the resource being reconciled, reconciliationId, and thread information 16 | """ 17 | 18 | _FIELDS_TO_PRINT = AlogJsonFormatter._FIELDS_TO_PRINT + [ 19 | "process", 20 | "thread", 21 | "threadName", 22 | "kind", 23 | "apiVersion", 24 | "resourceVersion", 25 | "resourceName", 26 | "reconciliationId", 27 | ] 28 | 29 | def __init__(self, manifest=None, reconciliation_id=None): 30 | super().__init__() 31 | self.manifest = manifest 32 | self.reconciliation_id = reconciliation_id 33 | 34 | def format(self, record): 35 | if self.reconciliation_id: 36 | record.reconciliationId = self.reconciliation_id 37 | 38 | if resource := getattr(record, "resource", self.manifest): 39 | record.kind = resource.get("kind") 40 | record.apiVersion = resource.get("apiVersion") 41 | 42 | metadata = resource.get("metadata", {}) 43 | record.resourceVersion = metadata.get("resourceVersion") 44 | record.resourceName = metadata.get("name") 45 | 46 | return super().format(record) 47 | -------------------------------------------------------------------------------- /oper8/managed_object.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helper object to represent a kubernetes object that is managed by the operator 3 | """ 4 | # Standard 5 | from typing import Callable, Optional 6 | import uuid 7 | 8 | KUBE_LIST_IDENTIFIER = "List" 9 | 10 | 11 | class ManagedObject: # pylint: disable=too-many-instance-attributes 12 | """Basic struct to represent a managed kubernetes object""" 13 | 14 | def __init__( 15 | self, 16 | definition: dict, 17 | verify_function: Optional[Callable] = None, 18 | deploy_method: Optional["DeployMethod"] = None, # noqa: F821 19 | ): 20 | self.kind = definition.get("kind") 21 | self.metadata = definition.get("metadata", {}) 22 | self.name = self.metadata.get("name") 23 | self.namespace = self.metadata.get("namespace") 24 | self.uid = self.metadata.get("uid", uuid.uuid4()) 25 | self.resource_version = self.metadata.get("resourceVersion") 26 | self.api_version = definition.get("apiVersion") 27 | self.definition = definition 28 | self.verify_function = verify_function 29 | self.deploy_method = deploy_method 30 | 31 | # If resource is not list then check name 32 | if KUBE_LIST_IDENTIFIER not in self.kind: 33 | assert self.name is not None, "No name found" 34 | 35 | assert self.kind is not None, "No kind found" 36 | assert self.api_version is not None, "No apiVersion found" 37 | 38 | def get(self, *args, **kwargs): 39 | """Pass get calls to the objects definition""" 40 | return self.definition.get(*args, **kwargs) 41 | 42 | def __str__(self): 43 | return f"{self.api_version}/{self.kind}/{self.name}" 44 | 45 | def __repr__(self): 46 | return str(self) 47 | 48 | def __hash__(self): 49 | """Hash explicitly excludes the definition so that the object's 50 | identifier in a map can be based only on the unique identifier of the 51 | resource in the cluster. If the original resource did not provide a unique 52 | identifier then use the apiVersion, kind, and name 53 | """ 54 | return hash(self.metadata.get("uid", str(self))) 55 | 56 | def __eq__(self, other): 57 | return hash(self) == hash(other) 58 | -------------------------------------------------------------------------------- /oper8/patch.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module holds shared semantics for patching resources using temporary_patch 3 | """ 4 | 5 | # Standard 6 | from typing import List 7 | import copy 8 | 9 | # Third Party 10 | from jsonpatch import JsonPatch 11 | 12 | # First Party 13 | import alog 14 | 15 | # Local 16 | from .patch_strategic_merge import patch_strategic_merge 17 | 18 | log = alog.use_channel("PATCH") 19 | 20 | ## Public Interface ############################################################ 21 | 22 | STRATEGIC_MERGE_PATCH = "patchStrategicMerge" 23 | JSON_PATCH_6902 = "patchJson6902" 24 | 25 | 26 | def apply_patches( 27 | internal_name: str, 28 | resource_definition: dict, 29 | temporary_patches: List[dict], 30 | ): 31 | """Apply all temporary patches to the given resource from the given list. 32 | The patches are applied in-place. 33 | 34 | Args: 35 | internal_name: str 36 | The name given to the internal node of the object. This is used to 37 | identify which patches apply to this object. 38 | resource_definition: dict 39 | The dict representation of the object to patch 40 | temporary_patches: List[dict] 41 | The list of temporary patches that apply to this rollout 42 | 43 | Returns: 44 | patched_definition: dict 45 | The dict representation of the object with patches applied 46 | """ 47 | log.debug2( 48 | "Looking for patches for %s/%s (%s)", 49 | resource_definition.get("kind"), 50 | resource_definition.get("metadata", {}).get("name"), 51 | internal_name, 52 | ) 53 | resource_definition = copy.deepcopy(resource_definition) 54 | for patch_content in temporary_patches: 55 | log.debug4("Checking patch: << %s >>", patch_content) 56 | 57 | # Look to see if this patch contains a match for the internal name 58 | internal_name_parts = internal_name.split(".") 59 | internal_name_parts.reverse() 60 | patch = patch_content.spec.patch 61 | log.debug4("Full patch section: %s", patch) 62 | while internal_name_parts and isinstance(patch, dict): 63 | patch_level = internal_name_parts.pop() 64 | log.debug4("Getting patch level [%s]", patch_level) 65 | patch = patch.get(patch_level, {}) 66 | log.debug4("Patch level: %s", patch) 67 | log.debug4("Checking patch: %s", patch) 68 | 69 | # If the patch matches, apply the right merge 70 | if patch and not internal_name_parts: 71 | log.debug3("Found matching patch: %s", patch_content.metadata.name) 72 | 73 | # Dispatch the right patch type 74 | if patch_content.spec.patchType == STRATEGIC_MERGE_PATCH: 75 | resource_definition = _apply_patch_strategic_merge( 76 | resource_definition, patch 77 | ) 78 | elif patch_content.spec.patchType == JSON_PATCH_6902: 79 | resource_definition = _apply_json_patch(resource_definition, patch) 80 | else: 81 | raise ValueError( 82 | f"Unsupported patch type [{patch_content.spec.patchType}]" 83 | ) 84 | return resource_definition 85 | 86 | 87 | ## JSON Patch 6902 ############################################################# 88 | 89 | 90 | def _apply_json_patch( 91 | resource_definition: dict, 92 | patch: dict, 93 | ) -> dict: 94 | """Apply a Json Patch based on JSON Patch (rfc 6902)""" 95 | 96 | if not isinstance(patch, list): 97 | raise ValueError("Invalid JSON 6902 patch. Must be a list of operations.") 98 | return JsonPatch(patch).apply(resource_definition) 99 | 100 | 101 | ## Strategic Merge Patch ####################################################### 102 | 103 | 104 | def _apply_patch_strategic_merge( 105 | resource_definition: dict, 106 | patch: dict, 107 | ) -> dict: 108 | """Apply a Strategic Merge Patch based on JSON Merge Patch (rfc 7386)""" 109 | return patch_strategic_merge(resource_definition, patch) 110 | -------------------------------------------------------------------------------- /oper8/test_helpers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/oper8/63d30d3d38147283ba9621c625aa83f4461ead22/oper8/test_helpers/__init__.py -------------------------------------------------------------------------------- /oper8/test_helpers/data/controller.template: -------------------------------------------------------------------------------- 1 | # Local 2 | from oper8.test_helpers.helpers import DummyController 3 | 4 | 5 | class {controller_name}(DummyController): 6 | def __init__(self, *args, **kwargs): 7 | super().__init__( 8 | *args, 9 | components={components}, 10 | **kwargs 11 | ) 12 | -------------------------------------------------------------------------------- /oper8/test_helpers/data/test_ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDUzCCAjugAwIBAgIUKB2+ebfH3LFuYBv9J7SI1X45028wDQYJKoZIhvcNAQEL 3 | BQAwUTELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZB 4 | cm1vbmsxDDAKBgNVBAoMA0lCTTEQMA4GA1UEAwwHaWJtLmNvbTAeFw0yMDA5MzAy 5 | MjUxMTdaFw00ODAyMTYyMjUxMTdaMFExCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhO 6 | ZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMQwwCgYDVQQKDANJQk0xEDAOBgNVBAMM 7 | B2libS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDH0Tb/QzuA 8 | XRMBYy0QgXO0m+XDm4rfWgLvZohFYQho7o3wANyYCuaSGzMyttI4JWr8FdhKk66U 9 | +Wy5N5VjUh3lJhFePaY4i8uTXFuOf3iczSXyIBAMn0ZL3GBcxAPbuUSoH/088pJg 10 | NhXbm3qu0ZEOioj0f0E3n3douJteWJ07NKrerobmeXVZF3ljZfVFDQT3tt0AjNrH 11 | 2l6p2oxFt9rbAVr0SRL6kiSs4u61nV4GBso6tg8Am6lU7sqPfCABkFh1VnpPU9mU 12 | RT4M+QNnu2rvoDWU7kl6sZ6GjVoer6KWtT8yCrO2tSvOGbPv8mDVSvSErz6OFMzS 13 | +9juiWrc2rA7AgMBAAGjIzAhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD 14 | AgKkMA0GCSqGSIb3DQEBCwUAA4IBAQAaKuqjX22xNczKQiOYscF2aKcVFsiN0Q0V 15 | jYPibXAvxDgzMTbl7iFb8/K9d7UhsdzMvMUo9YNBZYLXrFzSZ6Xki3XCP3RZsryi 16 | x9oZOQX35z1zcSjMI+JsVSIe49UXKuaEyDRe05ZRfFtH+qxOvozxl0mC7TsSrSyT 17 | LorbDP5FdgkUNU6AONm/LokzzMfdiu4cqWAGcWYMuB+bgI1FYvZBLmSpAFXVMUmh 18 | ctNK45MQeDGr3NzHfQuEiwHQ41b1Z05sg6h4tvPI4IhBMl1JIsGsxRKcGlGoW5qH 19 | 0eSkKHiXkdmA1m12+mCgmUq5XLULfZiLHPMS6qC/6IcNZjv1+XPq 20 | -----END CERTIFICATE----- 21 | -------------------------------------------------------------------------------- /oper8/test_helpers/data/test_ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDH0Tb/QzuAXRMB 3 | Yy0QgXO0m+XDm4rfWgLvZohFYQho7o3wANyYCuaSGzMyttI4JWr8FdhKk66U+Wy5 4 | N5VjUh3lJhFePaY4i8uTXFuOf3iczSXyIBAMn0ZL3GBcxAPbuUSoH/088pJgNhXb 5 | m3qu0ZEOioj0f0E3n3douJteWJ07NKrerobmeXVZF3ljZfVFDQT3tt0AjNrH2l6p 6 | 2oxFt9rbAVr0SRL6kiSs4u61nV4GBso6tg8Am6lU7sqPfCABkFh1VnpPU9mURT4M 7 | +QNnu2rvoDWU7kl6sZ6GjVoer6KWtT8yCrO2tSvOGbPv8mDVSvSErz6OFMzS+9ju 8 | iWrc2rA7AgMBAAECggEAYDrnq6NrYnRwlLh4mXxehtqVmtCr3sjwpO7SLed/L1nD 9 | zZP8qNSHGlIzI/db4mJS1biHg9L/vwsoS0LzlPOVJihhMOurw4u/rOZLyaKmKY26 10 | +pJqaEnEhwBJACP+X7En2Xgec123FNK3UB3exZZzr3cUYftvIySPU0WkvZkbj8kB 11 | XuLeFuTxsHgshGb60vAk29uTmBIDw5jA1/qguWaNYAtlAvtQwHO7Bf00HCx0AzaC 12 | SzGEqWcND4Rx2XNKS42zrZ3vzfITNbYzXxhAAE/AB1J/SHc3uyPK1lFRZKs9fUpc 13 | 7wTJRWZXap5bGCtQRLcNXwdiflTj55YPfBoqrivuAQKBgQDuwFR/XPfh/sffk+A2 14 | tVMthPspgEHByJInSXeOg/anDjtnbPMlr1Yx7a3TQ4ivSuBtxLDnp9TYn4MkXzXH 15 | 5E4EUowxL1R60EhVpo37hpFPQVrxjQBHK+zx433wECbRCwV8VgQDfeyB2mFjaq9Z 16 | Wv00gZkLLXoyLHwjIjA+ZiGRuwKBgQDWQM4r3DbFYuS8/dcnIR3BOVYKx1ENbxzV 17 | wzczxqjc7RCpU3R1fvqDRXWOey1B5m0Pkiq5oTGy0rSOYB7coBLaXNHNF5yZ3Tv0 18 | P3SwsqAe5nWeZWnMPJp7JILFhEj7yJXWJwoC8+RZKJK/ImbDR7JLJdgr5ruU40At 19 | C41erzgzgQKBgDXavnSoROX+cZl2IspUZnnLGFiJLMwIcSdZhBVqoPWX2UCAK+yh 20 | O2Exytbv0eIwkqVQC9OFYRaoDI3kyB3Z2ttQv3zMv1KgruBbHIXTGe31wQzhkGF9 21 | gAokFaFUE11uDgO451jeS74Sw4sIYptWUe1JGnuR+5FbxXirWpSItyPHAoGAJI80 22 | XxmZov7oBzPNddjZyfHts7MlKoSuR++iyMv6XpB5Slt6QN5lAudkUubD5z3ZmFGZ 23 | xskxwNDhjHXsbjtGXEqAZl3EFCFDLL9zOv+QARXUNi4Z6obg2Wh2hBu5lHU61Csf 24 | MzfI3EnnGIRXQrVGQDp586vQkGxitA0Hl4OQtIECgYBgN6oVOKlhiwcyMS67P0Vo 25 | wLHuGaTOTBa8MJ129hz9o8RVCAJxFN/noi6i/Ymqa3oah+UOreLkScKRb1/tBaad 26 | UrK7XxD+ZMYpfCn4w3p88zYgcAQjv6x2KT2TvJ+Hm58y4gs4T/2JCvVytwGNVelx 27 | sg4QzDtQGcDuau8vBBAb4A== 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /oper8/test_helpers/oper8x_helpers.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module holds helpers that rely on oper8.x 3 | """ 4 | # Standard 5 | import os 6 | 7 | # Local 8 | from .helpers import TEST_DATA_DIR 9 | from oper8.session import Session 10 | from oper8.x.utils import common 11 | from oper8.x.utils.tls_context.internal import InternalCaComponent 12 | 13 | 14 | def set_object_test_state( 15 | session: Session, 16 | kind: str, 17 | name: str, 18 | value, 19 | scoped_name: bool = True, 20 | parent_component_name: str = "test", 21 | ): 22 | obj_name = ( 23 | common.get_resource_cluster_name( 24 | resource_name=name, 25 | component=parent_component_name, 26 | session=session, 27 | ) 28 | if scoped_name 29 | else name 30 | ) 31 | value.setdefault("metadata", {}).setdefault("name", obj_name) 32 | value.setdefault("metadata", {}).setdefault( 33 | "namespace", session.namespace or "default" 34 | ) 35 | value.setdefault("kind", kind) 36 | success, changed = session.deploy_manager.deploy([value]) 37 | assert success, "Failed to set test state!" 38 | return success, changed 39 | 40 | 41 | def set_secret_data( 42 | session: Session, 43 | name, 44 | data=None, 45 | string_data=None, 46 | secret_type="generic", 47 | scoped_name: bool = True, 48 | ): 49 | set_object_test_state( 50 | session, 51 | "Secret", 52 | name, 53 | {"type": secret_type, "data": data, "stringData": string_data}, 54 | scoped_name=scoped_name, 55 | ) 56 | 57 | 58 | def set_tls_ca_secret(session): 59 | """Set the key/cert content for the shared CA secret. This function returns 60 | the pem-encoded values for convenience in other tests 61 | """ 62 | with open(os.path.join(TEST_DATA_DIR, "test_ca.key")) as f: 63 | key_pem = f.read() 64 | with open(os.path.join(TEST_DATA_DIR, "test_ca.crt")) as f: 65 | crt_pem = f.read() 66 | set_secret_data( 67 | session, 68 | InternalCaComponent.CA_SECRET_NAME, 69 | data={ 70 | InternalCaComponent.CA_KEY_FILENAME: common.b64_secret(key_pem), 71 | InternalCaComponent.CA_CRT_FILENAME: common.b64_secret(crt_pem), 72 | }, 73 | ) 74 | 75 | return key_pem, crt_pem 76 | -------------------------------------------------------------------------------- /oper8/version.py: -------------------------------------------------------------------------------- 1 | try: 2 | # Local 3 | from ._version import __version__, __version_tuple__ # noqa: F401 # unused import 4 | except ImportError: 5 | __version__ = "unknown" 6 | version_tuple = (0, 0, __version__) 7 | -------------------------------------------------------------------------------- /oper8/watch_manager/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Top-level watch_manager imports 3 | """ 4 | 5 | # Local 6 | from .ansible_watch_manager import AnsibleWatchManager 7 | from .base import WatchManagerBase 8 | from .dry_run_watch_manager import DryRunWatchManager 9 | from .python_watch_manager import PythonWatchManager 10 | 11 | # Expose the static start/stop functions at the top 12 | start_all = WatchManagerBase.start_all 13 | stop_all = WatchManagerBase.stop_all 14 | -------------------------------------------------------------------------------- /oper8/watch_manager/ansible_watch_manager/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module holds the ansible implementation of the WatchManager 3 | """ 4 | 5 | # Local 6 | from .ansible_watch_manager import AnsibleWatchManager 7 | -------------------------------------------------------------------------------- /oper8/watch_manager/ansible_watch_manager/modules/log_rotator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helper module to implement a once-per-open log rotating handler 3 | """ 4 | 5 | # Standard 6 | from logging.handlers import RotatingFileHandler 7 | import os 8 | 9 | 10 | class AutoRotatingFileHandler(RotatingFileHandler): 11 | """This subclass of the RotatingFileHandler will perform a rotation once on 12 | construction. The result is that the previous log file will be backed up 13 | before opening the new file, but the file will never rotate during a single 14 | reconcile loop. 15 | """ 16 | 17 | def __init__(self, filename, backupCount=10): 18 | """Construct with only the filename and backupCount args of the base 19 | class. 20 | 21 | Args: 22 | filename: str 23 | The name of the primary log file to manage 24 | backupCount: int 25 | The number of backed up copies of the log file to keep 26 | """ 27 | file_already_there = os.path.exists(filename) 28 | super().__init__(filename, backupCount=backupCount, maxBytes=0) 29 | if file_already_there: 30 | self.doRollover() 31 | -------------------------------------------------------------------------------- /oper8/watch_manager/ansible_watch_manager/resources/playbook-base.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | collections: 5 | - kubernetes.core 6 | - operator_sdk.util 7 | 8 | tasks: 9 | - include_role: 10 | name: oper8_app 11 | vars: {} 12 | -------------------------------------------------------------------------------- /oper8/watch_manager/ansible_watch_manager/resources/roles/oper8_app/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | manage_ansible_status: false 3 | operation: add 4 | log_level: "" 5 | log_filters: "" 6 | log_json: false 7 | dry_run: false 8 | standalone: false 9 | log_thread_id: false 10 | log_file: "{{ full_cr.kind | lower }}.log" 11 | working_dir: 12 | version: "" 13 | enable_ansible_vcs: false 14 | -------------------------------------------------------------------------------- /oper8/watch_manager/ansible_watch_manager/resources/roles/oper8_app/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Running oper8 application reconciliation 3 | # Required vars arguments: 4 | # - full_cr: full Custom Resource manifests. 5 | # - controller_class: full class path of oper8.Controller inherited class 6 | - debug: 7 | msg: "Starting reconciliation of {{ full_cr.kind }}/{{ ansible_operator_meta.name }}" 8 | 9 | - name: Detect VCS configuration 10 | # There are three ways to modify the ansible-vcs enablement: 11 | # 12 | # 1) Define the 'develop_shell' ansible variable in the playbook invocation 13 | # 2) Define the per-playbook 'enable_ansible_vcs' variable 14 | # 3) Set the ANSIBLE_VCS environment variable 15 | when: develop_shell is not defined and enable_ansible_vcs | default('true', True) | string != "false" and lookup('env', 'ANSIBLE_VCS') | default('true', True) | string != "false" 16 | ansible.builtin.set_fact: 17 | enable_ansible_vcs: true 18 | 19 | - name: "Oper8 Controller" 20 | environment: 21 | PYTHONPATH: "{{ lookup('env', 'PYTHONPATH') }}:/opt/ansible/app/version/{{ version }}/{{ full_cr.kind | lower }}/{{ ansible_operator_meta.name }}/src" 22 | register: reconcile_result 23 | k8s_application: 24 | # (required) version: This is the version passed through from the CR 25 | version: "{{ version }}" 26 | 27 | # (required) controller_class: This is the fully-qualified name of the 28 | # python Applicaiton class that is bound to this operator. 29 | controller_class: "{{ controller_class }}" 30 | 31 | # (required) operation: Either "add" for a standard reconcile or "remove" 32 | # for a finalizer 33 | operation: "{{ operation }}" 34 | 35 | # (required): This template passes the content of the full CR 36 | # which triggered the deploy into the library so that the deploy 37 | # parameters can be parsed there. 38 | full_cr: "{{ full_cr }}" 39 | 40 | # (optional) manage_ansible_status: If true, oper8 will emulate the 41 | # status management done natively by ansible based on the readiness 42 | # values of oper8's native status management 43 | manage_ansible_status: "{{ manage_ansible_status }}" 44 | 45 | # Enable strict versioning and status.versions.available.versions 46 | strict_versioning: "{{ strict_versioning }}" 47 | 48 | # (optional) VCS 49 | enable_ansible_vcs: "{{ enable_ansible_vcs }}" 50 | ## Logging ## 51 | 52 | # (optional) log_level: This sets the default verbosity level of the 53 | # logging from inside of the k8s_applicaiton library. 54 | log_level: "{{ log_level }}" 55 | 56 | # (optional) log_filters: This sets per-channel verbosity levels for the 57 | # logging from inside of the k8s_applicaiton library. 58 | log_filters: "{{ log_filters }}" 59 | 60 | # (optional) log_json: This sets the output formatter for 61 | # k8s_application logging to be 'json' rather than 'pretty' 62 | log_json: "{{ log_json }}" 63 | 64 | # (optional) log_thread_id: This sets weather to log thread ids or not 65 | # useful for dryrun/multithreaded debugging 66 | log_thread_id: "{{ log_thread_id }}" 67 | 68 | # (optional) log_file: This sets the log_file location for oper8 logs 69 | log_file: "{{ log_file }}" 70 | 71 | ## Dev / Debug ## 72 | 73 | # (optional) dry_run: This sets the operator to render-only mode and 74 | # will not actually manage the downstream resources. 75 | dry_run: "{{ dry_run }}" 76 | 77 | # (optional) standalone: This sets the k8s_applicaiton library to 78 | # operate outside of 'operator' mode, enabling the playbook to be run 79 | # directly from the command line rather than inside of a deployed 80 | # operator. 81 | standalone: "{{ standalone }}" 82 | 83 | # (optional) working_dir: This sets a named working directory to be used 84 | # by the k8s_application library rather than letting it create a 85 | # random ephemeral working dir internally. 86 | working_dir: "{{ working_dir }}" 87 | 88 | - name: Display reconcile result 89 | debug: 90 | msg: "Reconcile result of {{ full_cr.kind }}/{{ ansible_operator_meta.name }}: {{ reconcile_result }}" 91 | 92 | - name: Requeue reconcile request 93 | operator_sdk.util.requeue_after: 94 | time: "{{ reconcile_result.requeue_after }}" 95 | when: reconcile_result.should_requeue | bool 96 | -------------------------------------------------------------------------------- /oper8/watch_manager/base.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module holds the base class interface for the various implementations of 3 | WatchManager 4 | """ 5 | 6 | # Standard 7 | from typing import Type 8 | import abc 9 | 10 | # First Party 11 | import alog 12 | 13 | # Local 14 | from ..controller import Controller 15 | 16 | log = alog.use_channel("WATCH") 17 | 18 | 19 | class WatchManagerBase(abc.ABC): 20 | """A WatchManager is responsible for linking a kubernetes custom resource 21 | type with a Controller that will execute the reconciliation loop 22 | """ 23 | 24 | # Class-global mapping of all watches managed by this operator 25 | _ALL_WATCHES = {} 26 | 27 | ## Interface ############################################################### 28 | 29 | def __init__( 30 | self, 31 | controller_type: Type[Controller], 32 | ): 33 | """Construct with the controller type that will be watched 34 | 35 | Args: 36 | controller_type: Type[Controller], 37 | The Controller instance that will manage this group/version/kind 38 | """ 39 | self.controller_type = controller_type 40 | self.group = controller_type.group 41 | self.version = controller_type.version 42 | self.kind = controller_type.kind 43 | 44 | # Register this watch instance 45 | watch_key = str(self) 46 | assert ( 47 | watch_key not in self._ALL_WATCHES 48 | ), "Only a single controller may watch a given group/version/kind" 49 | self._ALL_WATCHES[watch_key] = self 50 | 51 | @abc.abstractmethod 52 | def watch(self) -> bool: 53 | """The watch function is responsible for initializing the persistent 54 | watch and returning whether or not the watch was started successfully. 55 | 56 | Returns: 57 | success: bool 58 | True if the watch was spawned correctly, False otherwise. 59 | """ 60 | 61 | @abc.abstractmethod 62 | def wait(self): 63 | """The wait function is responsible for blocking until the managed watch 64 | has been terminated. 65 | """ 66 | 67 | @abc.abstractmethod 68 | def stop(self): 69 | """Terminate this watch if it is currently running""" 70 | 71 | ## Utilities ############################################################### 72 | 73 | @classmethod 74 | def start_all(cls) -> bool: 75 | """This utility starts all registered watches 76 | 77 | Returns: 78 | success: bool 79 | True if all watches started succssfully, False otherwise 80 | """ 81 | started_watches = [] 82 | success = True 83 | # NOTE: sorting gives deterministic order so that launch failures can be 84 | # diagnosed (and tested) more easily. This is not strictly necessary, 85 | # but it also doesn't hurt and it is nice to have. 86 | for _, watch in sorted(cls._ALL_WATCHES.items()): 87 | if watch.watch(): 88 | log.debug("Successfully started %s", watch) 89 | started_watches.append(watch) 90 | else: 91 | log.warning("Failed to start %s", watch) 92 | success = False 93 | 94 | # Shut down all successfully started watches 95 | for started_watch in started_watches: 96 | started_watch.stop() 97 | 98 | # Don't start any of the others 99 | break 100 | 101 | # Wait on all of them to terminate 102 | for watch in cls._ALL_WATCHES.values(): 103 | watch.wait() 104 | 105 | return success 106 | 107 | @classmethod 108 | def stop_all(cls): 109 | """This utility stops all watches""" 110 | for watch in cls._ALL_WATCHES.values(): 111 | try: 112 | watch.stop() 113 | log.debug2("Waiting for %s to terminate", watch) 114 | watch.wait() 115 | except Exception as exc: # pylint: disable=broad-exception-caught 116 | log.error("Failed to stop watch manager %s", exc, exc_info=True) 117 | 118 | ## Implementation Details ################################################## 119 | 120 | def __str__(self): 121 | """String representation of this watch""" 122 | return f"Watch[{self.controller_type}]" 123 | -------------------------------------------------------------------------------- /oper8/watch_manager/dry_run_watch_manager.py: -------------------------------------------------------------------------------- 1 | """ 2 | Dry run implementation of the WatchManager abstraction 3 | """ 4 | 5 | # Standard 6 | from functools import partial 7 | from typing import Optional, Type 8 | import logging 9 | 10 | # First Party 11 | import alog 12 | 13 | # Local 14 | from ..controller import Controller 15 | from ..deploy_manager import DryRunDeployManager 16 | from ..reconcile import ReconcileManager 17 | from .base import WatchManagerBase 18 | 19 | log = alog.use_channel("DRWAT") 20 | 21 | 22 | class DryRunWatchManager(WatchManagerBase): 23 | """ 24 | The DryRunWatchManager implements the WatchManagerBase interface with using 25 | a single shared DryRunDeployManager to manage an in-memory representation of 26 | the cluster. 27 | """ 28 | 29 | reconcile_manager = None 30 | 31 | def __init__( 32 | self, 33 | controller_type: Type[Controller], 34 | deploy_manager: Optional[DryRunDeployManager] = None, 35 | ): 36 | """Construct with the type of controller to watch and optionally a 37 | deploy_manager instance. A deploy_manager will be constructed if none is 38 | given. 39 | 40 | Args: 41 | controller_type: Type[Controller] 42 | The class for the controller that will be watched 43 | deploy_manager: Optional[DryRunWatchManager] 44 | If given, this deploy_manager will be used. This allows for 45 | there to be pre-populated resources. Note that it _must_ be a 46 | DryRunDeployManager (or child class) that supports registering 47 | watches. 48 | """ 49 | super().__init__(controller_type) 50 | 51 | # Set up the deploy manager 52 | self._deploy_manager = deploy_manager or DryRunDeployManager() 53 | 54 | # We lazily initialize the controller instance in watch and _resource in run_reconcile 55 | self._controller = None 56 | self._resource = {} 57 | 58 | # We initialize the reconcile_manager instance on first watch creation 59 | if not self.reconcile_manager: 60 | self.reconcile_manager = ReconcileManager( 61 | deploy_manager=self._deploy_manager, reimport_controller=False 62 | ) 63 | 64 | def watch(self) -> bool: 65 | """Register the watch with the deploy manager""" 66 | if self._controller is not None: 67 | log.warning("Cannot watch multiple times!") 68 | return False 69 | 70 | log.debug("Registering %s with the DeployManager", self.controller_type) 71 | 72 | # Construct controller 73 | self._controller = self.controller_type() 74 | 75 | # Register watch and finalizers 76 | api_version = f"{self.group}/{self.version}" 77 | self._deploy_manager.register_watch( 78 | api_version=api_version, 79 | kind=self.kind, 80 | callback=partial(self.run_reconcile, False), 81 | ) 82 | if self.controller_type.has_finalizer: 83 | log.debug("Registering finalizer") 84 | self._deploy_manager.register_finalizer( 85 | api_version=api_version, 86 | kind=self.kind, 87 | callback=partial(self.run_reconcile, True), 88 | ) 89 | 90 | return True 91 | 92 | def wait(self): 93 | """There is nothing to do in wait""" 94 | 95 | def stop(self): 96 | """There is nothing to do in stop""" 97 | 98 | def run_reconcile(self, is_finalizer: bool, resource: dict): 99 | """Wrapper function to simplify parameter/partial mapping""" 100 | if not self.reconcile_manager: 101 | return 102 | 103 | # Only run reconcile if it's a unique resource 104 | resource_metadata = self._resource.get("metadata", {}) 105 | if ( 106 | self._resource.get("kind") == resource.get("kind") 107 | and self._resource.get("apiVersion") == resource.get("apiVersion") 108 | and resource_metadata.get("name") 109 | == resource.get("metadata", {}).get("name") 110 | and resource_metadata.get("namespace") 111 | == resource.get("metadata", {}).get("namespace") 112 | ): 113 | return 114 | 115 | # Save the current resource and log handlers then restore it after the reconcile 116 | # is completed 117 | log_formatters = {} 118 | for handler in logging.getLogger().handlers: 119 | log_formatters[handler] = handler.formatter 120 | current_resource = self._resource 121 | self._resource = resource 122 | 123 | self.reconcile_manager.reconcile(self._controller, resource, is_finalizer) 124 | self._resource = current_resource 125 | for handler, formatter in log_formatters.items(): 126 | handler.setFormatter(formatter) 127 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module holds the pure-python implementation of the WatchManager 3 | """ 4 | 5 | # Local 6 | from .python_watch_manager import PythonWatchManager 7 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/filters/__init__.py: -------------------------------------------------------------------------------- 1 | """ __init__ file for Filter submodule. Imports all filters, functions, 2 | and classes from filters module """ 3 | # Local 4 | from .common import get_configured_filter, get_filters_for_resource_id 5 | from .filters import ( 6 | AnnotationFilter, 7 | CreationDeletionFilter, 8 | DependentWatchFilter, 9 | DisableFilter, 10 | EnableFilter, 11 | Filter, 12 | GenerationFilter, 13 | LabelFilter, 14 | NoGenerationFilter, 15 | PauseFilter, 16 | ResourceVersionFilter, 17 | SubsystemStatusFilter, 18 | UserAnnotationFilter, 19 | ) 20 | from .manager import AndFilter, FilterManager, OrFilter 21 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/filters/common.py: -------------------------------------------------------------------------------- 1 | """Common functions used for interacting with filters including default filter classes""" 2 | 3 | # Standard 4 | from functools import lru_cache 5 | from typing import List, Type 6 | import importlib 7 | import inspect 8 | 9 | # First Party 10 | import alog 11 | 12 | # Local 13 | from .... import config 14 | from ....exceptions import ConfigError 15 | from .filters import ( 16 | AnnotationFilter, 17 | CreationDeletionFilter, 18 | EnableFilter, 19 | Filter, 20 | GenerationFilter, 21 | NoGenerationFilter, 22 | PauseFilter, 23 | ResourceVersionFilter, 24 | UserAnnotationFilter, 25 | ) 26 | from .manager import AndFilter, OrFilter 27 | 28 | log = alog.use_channel("PWMFLTCOM") 29 | 30 | 31 | ### Factory Filter Classes 32 | 33 | # Usable Default Filter Classes. 34 | DEFAULT_FILTER_CLASS = AndFilter( 35 | CreationDeletionFilter, 36 | GenerationFilter, 37 | NoGenerationFilter, 38 | ResourceVersionFilter, 39 | PauseFilter, 40 | ) 41 | ANNOTATION_FILTER_CLASS = OrFilter(DEFAULT_FILTER_CLASS, AnnotationFilter) 42 | USER_ANNOTATION_FILTER_CLASS = OrFilter(DEFAULT_FILTER_CLASS, UserAnnotationFilter) 43 | 44 | FILTER_CLASSES = { 45 | "default": DEFAULT_FILTER_CLASS, 46 | "annotation": ANNOTATION_FILTER_CLASS, 47 | "user-annotation": USER_ANNOTATION_FILTER_CLASS, 48 | } 49 | 50 | 51 | # Forward Declarations 52 | CONTROLLER_TYPE = "Controller" 53 | CONTROLLER_CLASS_TYPE = Type[CONTROLLER_TYPE] 54 | RESOURCE_ID_TYPE = "ResourceId" 55 | 56 | ### Factory Filter Functions 57 | 58 | 59 | # Only compute the filters once to avoid reimporting/regathering 60 | @lru_cache(maxsize=1) 61 | def get_configured_filter() -> Filter: 62 | """Get the default filter that should be applied to every resource 63 | 64 | Returns: 65 | default_filter: Filter 66 | The default filter specified in the Config""" 67 | 68 | filter_name = config.python_watch_manager.filter 69 | 70 | # Check for filter in default list or attempt to 71 | # manually import one 72 | if filter_name in FILTER_CLASSES: 73 | filter_obj = FILTER_CLASSES[filter_name] 74 | elif inspect.isclass(filter_name) and issubclass(filter_name, Filter): 75 | filter_obj = filter_name 76 | elif isinstance(filter_name, str): 77 | filter_obj = import_filter(filter_name) 78 | # If no filter is provided then always enable 79 | else: 80 | filter_obj = EnableFilter 81 | 82 | log.debug2(f"Found filter: {filter_obj}") 83 | return filter_obj 84 | 85 | 86 | def get_filters_for_resource_id( 87 | controller_type: CONTROLLER_CLASS_TYPE, resource_id: RESOURCE_ID_TYPE 88 | ) -> List[Filter]: 89 | """Get the filters for a particular resource_id given a controller_type 90 | 91 | Args: 92 | controller_type: CONTROLLER_CLASS_TYPE 93 | The controller type whose filters we're inspecting 94 | resource_id: "ResourceId" 95 | The requested resource 96 | 97 | Returns: 98 | filter_list: List[Filter] 99 | The list of filters to be applied 100 | """ 101 | filters = getattr(controller_type, "pwm_filters", []) 102 | 103 | if isinstance(filters, list): 104 | return_filters = filters 105 | 106 | elif isinstance(filters, dict): 107 | return_filters = filters.get(resource_id.global_id, []) 108 | 109 | else: 110 | raise ConfigError(f"Invalid type for PWM filters: {type(filters)}") 111 | 112 | log.debug3(f"Found filters {return_filters} for resource: {resource_id}") 113 | return return_filters 114 | 115 | 116 | ### Helper Functions 117 | 118 | 119 | def import_filter(filter_name: str) -> Filter: 120 | """Import a filter from a string reference 121 | 122 | Args: 123 | filter_name: str 124 | Filter name in . form 125 | 126 | Returns: 127 | imported_filter: Filter 128 | The filter that was requested 129 | """ 130 | module_path, class_name = filter_name.rsplit(".", 1) 131 | try: 132 | filter_module = importlib.import_module(module_path) 133 | filter_obj = getattr(filter_module, class_name) 134 | except (ImportError, AttributeError) as exc: 135 | raise ConfigError( 136 | f"Invalid Filter: {filter_name}. Module or class not found" 137 | ) from exc 138 | 139 | if ( 140 | inspect.isclass(filter_obj) and not issubclass(filter_obj, Filter) 141 | ) and not isinstance(filter_obj, (Filter, list, tuple)): 142 | raise ConfigError(f"{filter_obj} is not a instance of {Filter}") 143 | 144 | return filter_obj 145 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/leader_election/__init__.py: -------------------------------------------------------------------------------- 1 | """__init__ file for leadership election classes. Imports all leadership managers 2 | and defines a generic helper""" 3 | # Standard 4 | from typing import Type 5 | 6 | # Local 7 | from .... import config 8 | from .annotation import AnnotationLeadershipManager 9 | from .base import LeadershipManagerBase 10 | from .dry_run import DryRunLeadershipManager 11 | from .lease import LeaderWithLeaseManager 12 | from .life import LeaderForLifeManager 13 | 14 | 15 | def get_leader_election_class() -> Type[LeadershipManagerBase]: 16 | """Get the current configured leadership election""" 17 | if config.python_watch_manager.lock.type == "leader-for-life": 18 | return LeaderForLifeManager 19 | if config.python_watch_manager.lock.type == "leader-with-lease": 20 | return LeaderWithLeaseManager 21 | if config.python_watch_manager.lock.type == "annotation": 22 | return AnnotationLeadershipManager 23 | if config.python_watch_manager.lock.type == "dryrun": 24 | return DryRunLeadershipManager 25 | return DryRunLeadershipManager 26 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/leader_election/dry_run.py: -------------------------------------------------------------------------------- 1 | """Implementation of the DryRun LeaderElection""" 2 | # Standard 3 | from typing import Optional 4 | 5 | # Local 6 | from ....managed_object import ManagedObject 7 | from .base import LeadershipManagerBase 8 | 9 | 10 | class DryRunLeadershipManager(LeadershipManagerBase): 11 | """DryRunLeaderElection class implements an empty leadership 12 | election manager which always acts as a leader. This is useful 13 | for dryrun or running without leadership election""" 14 | 15 | def acquire(self, force: bool = False): 16 | """ 17 | Return true as dryrun is always leader 18 | """ 19 | return True 20 | 21 | def acquire_resource(self, resource: ManagedObject): 22 | """ 23 | Return true as dryrun is always leader 24 | """ 25 | return True 26 | 27 | def release(self): 28 | """ 29 | NoOp in DryRun as lock is not real 30 | """ 31 | 32 | def release_resource(self, resource: ManagedObject): 33 | """ 34 | NoOp in DryRun as lock is not real 35 | """ 36 | 37 | def is_leader(self, resource: Optional[ManagedObject] = None): 38 | """ 39 | DryRunLeadershipManager is always leader 40 | """ 41 | return True 42 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/leader_election/life.py: -------------------------------------------------------------------------------- 1 | """Implementation of the Leader-for-Life LeaderElection""" 2 | # First Party 3 | import alog 4 | 5 | # Local 6 | from .... import config 7 | from ....deploy_manager.owner_references import update_owner_references 8 | from ....exceptions import ConfigError, assert_config 9 | from ....managed_object import ManagedObject 10 | from ....utils import nested_get 11 | from ..utils import get_operator_namespace, get_pod_name 12 | from .base import ThreadedLeaderManagerBase 13 | 14 | log = alog.use_channel("LDRLIFE") 15 | 16 | 17 | class LeaderForLifeManager(ThreadedLeaderManagerBase): 18 | """ 19 | LeaderForLifeManager Class implements the old "leader-for-life" operator-sdk 20 | lock type. This lock creates a configmap with the operator pod as owner in 21 | the current namespace. This way when the pod is deleted or list so is the 22 | configmap. 23 | """ 24 | 25 | def __init__(self, deploy_manager): 26 | """ 27 | Initialize class with lock_name, current namespace, and pod information 28 | """ 29 | super().__init__(deploy_manager) 30 | 31 | # Gather lock_name, namespace and pod manifest 32 | self.lock_name = ( 33 | config.operator_name 34 | if config.operator_name 35 | else config.python_watch_manager.lock.name 36 | ) 37 | 38 | self.namespace = get_operator_namespace() 39 | pod_name = get_pod_name() 40 | assert_config(self.lock_name, "Unable to detect lock name") 41 | assert_config(self.namespace, "Unable to detect operator namespace") 42 | assert_config(pod_name, "Unable to detect pod name") 43 | 44 | # Get the current pod context which is used in the lock configmap 45 | log.debug("Gathering pod context information") 46 | success, pod_obj = self.deploy_manager.get_object_current_state( 47 | kind="Pod", name=pod_name, namespace=self.namespace, api_version="v1" 48 | ) 49 | if not success or not pod_obj: 50 | log.error( 51 | "Unable to fetch pod %s/%s Unable to use leader-for-life without ownerReference", 52 | self.namespace, 53 | pod_name, 54 | ) 55 | raise ConfigError( 56 | f"Unable to fetch pod {self.namespace}/{pod_name}." 57 | "Unable to use leader-for-life without ownerReference" 58 | ) 59 | 60 | self.pod_manifest = ManagedObject(pod_obj) 61 | 62 | def renew_or_acquire(self): 63 | """ 64 | Renew or acquire lock by checking the current configmap status 65 | """ 66 | # Get current config map 67 | success, cluster_config_map = self.deploy_manager.get_object_current_state( 68 | kind="ConfigMap", 69 | name=self.lock_name, 70 | namespace=self.namespace, 71 | api_version="v1", 72 | ) 73 | if not success: 74 | log.warning( 75 | "Unable to fetch config map %s/%s", self.namespace, self.lock_name 76 | ) 77 | 78 | # If configmap exists then verify owner ref 79 | if cluster_config_map: 80 | log.debug2( 81 | f"ConfigMap Lock {cluster_config_map} already exists, checking ownership" 82 | ) 83 | owner_ref_list = nested_get( 84 | cluster_config_map, "metadata.ownerReferences", [] 85 | ) 86 | if len(owner_ref_list) != 1: 87 | log.error( 88 | "Invalid leadership config map detected. Only one owner allowed" 89 | ) 90 | self.release_lock() 91 | return 92 | 93 | if owner_ref_list[0].get("uid") == self.pod_manifest.uid: 94 | self.acquire_lock() 95 | else: 96 | self.release_lock() 97 | 98 | # Create configmap if it doesn't exist 99 | else: 100 | log.debug2(f"ConfigMap Lock {cluster_config_map} does not exist, creating") 101 | config_map = { 102 | "kind": "ConfigMap", 103 | "apiVersion": "v1", 104 | "metadata": { 105 | "name": self.lock_name, 106 | "namespace": self.namespace, 107 | }, 108 | } 109 | update_owner_references( 110 | self.deploy_manager, self.pod_manifest.definition, config_map 111 | ) 112 | success, _ = self.deploy_manager.deploy( 113 | [config_map], manage_owner_references=False 114 | ) 115 | if not success: 116 | log.warning("Unable to acquire leadership lock") 117 | self.release_lock() 118 | else: 119 | self.acquire_lock() 120 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/threads/__init__.py: -------------------------------------------------------------------------------- 1 | """Import the ThreadBase and subclasses""" 2 | # Local 3 | from .base import ThreadBase 4 | from .heartbeat import HeartbeatThread 5 | from .reconcile import ReconcileThread 6 | from .timer import TimerThread 7 | from .watch import WatchThread 8 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/threads/base.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for the ThreadBase Class 3 | """ 4 | 5 | # Standard 6 | import threading 7 | 8 | # First Party 9 | import alog 10 | 11 | # Local 12 | from ....deploy_manager import DeployManagerBase 13 | from ..leader_election import DryRunLeadershipManager, LeadershipManagerBase 14 | 15 | log = alog.use_channel("TRDUTLS") 16 | 17 | 18 | class ThreadBase(threading.Thread): 19 | """Base class for all other thread classes. This class handles generic starting, stopping, 20 | and leadership functions""" 21 | 22 | def __init__( 23 | self, 24 | name: str = None, 25 | daemon: bool = None, 26 | deploy_manager: DeployManagerBase = None, 27 | leadership_manager: LeadershipManagerBase = None, 28 | ): 29 | """Initialize class and store required instance variables. This function 30 | is normally overriden by subclasses that pass in static name/daemon variables 31 | 32 | Args: 33 | name:str=None 34 | The name of the thread to manager 35 | daemon:bool=None 36 | Whether python should wait for this thread to stop before exiting 37 | deploy_manager: DeployManagerBase = None 38 | The deploy manager available to this thread during start() 39 | leadership_manager: LeadershipManagerBase = None 40 | The leadership_manager for tracking elections 41 | """ 42 | self.deploy_manager = deploy_manager 43 | self.leadership_manager = leadership_manager or DryRunLeadershipManager() 44 | self.shutdown = threading.Event() 45 | super().__init__(name=name, daemon=daemon) 46 | 47 | ## Abstract Interface ###################################################### 48 | # 49 | # These functions must be implemented by child classes 50 | ## 51 | def run(self): 52 | """Control loop for the thread. Once this function exits the thread stops""" 53 | raise NotImplementedError() 54 | 55 | ## Base Class Interface #################################################### 56 | # 57 | # These methods MAY be implemented by children, but contain default 58 | # implementations that are appropriate for simple cases. 59 | # 60 | ## 61 | 62 | def start_thread(self): 63 | """If the thread is not already alive start it""" 64 | if not self.is_alive(): 65 | log.info("Starting %s: %s", self.__class__.__name__, self.name) 66 | self.start() 67 | 68 | def stop_thread(self): 69 | """Set the shutdown event""" 70 | log.info("Stopping %s: %s", self.__class__.__name__, self.name) 71 | self.shutdown.set() 72 | 73 | def should_stop(self) -> bool: 74 | """Helper to determine if a thread should shutdown""" 75 | return self.shutdown.is_set() 76 | 77 | def check_preconditions(self) -> bool: 78 | """Helper function to check if the thread should shutdown or reacquire leadership""" 79 | if self.should_stop(): 80 | return False 81 | 82 | if self.leadership_manager and not self.leadership_manager.is_leader(): 83 | log.debug3("Waiting for leadership") 84 | self.leadership_manager.acquire() 85 | 86 | return True 87 | 88 | def wait_on_precondition(self, timeout: float) -> bool: 89 | """Helper function to allow threads to wait for a certain period of time 90 | only being interrupted for preconditions""" 91 | self.shutdown.wait(timeout) 92 | 93 | return self.check_preconditions() 94 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/threads/heartbeat.py: -------------------------------------------------------------------------------- 1 | """ 2 | Thread class that will dump a heartbeat to a file periodically 3 | """ 4 | 5 | # Standard 6 | from datetime import datetime 7 | import threading 8 | 9 | # First Party 10 | import alog 11 | 12 | # Local 13 | from ..utils import parse_time_delta 14 | from .timer import TimerThread 15 | 16 | log = alog.use_channel("HBEAT") 17 | 18 | 19 | class HeartbeatThread(TimerThread): 20 | """The HeartbeatThread acts as a pulse for the PythonWatchManager. 21 | 22 | This thread will periodically dump the value of "now" to a file which can be 23 | read by an observer such as a liveness/readiness probe to ensure that the 24 | manager is functioning well. 25 | """ 26 | 27 | # This format is designed to be read using `date -d $(cat heartbeat.txt)` 28 | # using the GNU date utility 29 | # CITE: https://www.gnu.org/software/coreutils/manual/html_node/Examples-of-date.html 30 | _DATE_FORMAT = "%Y-%m-%d %H:%M:%S" 31 | 32 | def __init__(self, heartbeat_file: str, heartbeat_period: str): 33 | """Initialize with the file location for the heartbeat output 34 | 35 | Args: 36 | heartbeat_file: str 37 | The fully-qualified path to the heartbeat file 38 | heartbeat_period: str 39 | Time delta string representing period delay between beats. 40 | NOTE: The GNU `date` utility cannot parse sub-seconds easily, so 41 | the expected configuration for this is to be >= 1s 42 | """ 43 | self._heartbeat_file = heartbeat_file 44 | self._offset = parse_time_delta(heartbeat_period) 45 | self._beat_lock = threading.Lock() 46 | self._beat_event = threading.Event() 47 | super().__init__(name="heartbeat_thread") 48 | 49 | def run(self): 50 | self._run_heartbeat() 51 | return super().run() 52 | 53 | def wait_for_beat(self): 54 | """Wait for the next beat""" 55 | # Make sure the beat lock is not held before starting wait. This 56 | # prevents beats that are immediately ready 57 | with self._beat_lock: 58 | pass 59 | 60 | # Wait for the next beat 61 | self._beat_event.wait() 62 | 63 | def _run_heartbeat(self): 64 | """Run the heartbeat dump to the heartbeat file and put the next beat""" 65 | now = datetime.now() 66 | log.debug3("Heartbeat %s", now) 67 | 68 | # Save the beat to disk 69 | try: 70 | with open(self._heartbeat_file, "w", encoding="utf-8") as handle: 71 | handle.write(now.strftime(self._DATE_FORMAT)) 72 | handle.flush() 73 | except Exception as err: 74 | log.warning("Failed to write heartbeat file: %s", err, exc_info=True) 75 | 76 | # Unblock and reset the wait condition 77 | with self._beat_lock: 78 | self._beat_event.set() 79 | self._beat_event.clear() 80 | 81 | # Put the next beat if not stopped 82 | if not self.should_stop(): 83 | self.put_event(now + self._offset, self._run_heartbeat) 84 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ Import All functions, constants, and class from utils module """ 2 | # Local 3 | from .common import ( 4 | get_logging_handlers, 5 | get_operator_namespace, 6 | get_pod_name, 7 | obj_to_hash, 8 | parse_time_delta, 9 | ) 10 | from .constants import ( 11 | JOIN_PROCESS_TIMEOUT, 12 | MIN_SLEEP_TIME, 13 | RESERVED_PLATFORM_ANNOTATIONS, 14 | RESOURCE_VERSION_KEEP_COUNT, 15 | SHUTDOWN_RECONCILE_POLL_TIME, 16 | ) 17 | from .log_handler import LogQueueHandler 18 | from .types import ( 19 | ABCSingletonMeta, 20 | ClassInfo, 21 | ReconcileProcess, 22 | ReconcileRequest, 23 | ReconcileRequestType, 24 | ResourceId, 25 | Singleton, 26 | TimerEvent, 27 | WatchedResource, 28 | WatchRequest, 29 | ) 30 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/utils/common.py: -------------------------------------------------------------------------------- 1 | """ 2 | Shared utilities for the PythonWatchManager 3 | """ 4 | # Standard 5 | from datetime import timedelta 6 | from typing import Any, List, Optional 7 | import json 8 | import logging 9 | import pathlib 10 | import platform 11 | import re 12 | 13 | # First Party 14 | import alog 15 | 16 | # Local 17 | from .... import config 18 | 19 | log = alog.use_channel("PWMCMMN") 20 | 21 | 22 | ## Time Functions 23 | 24 | # Shamelessly stolen from 25 | # https://stackoverflow.com/questions/4628122/how-to-construct-a-timedelta-object-from-a-simple-string 26 | regex = re.compile( 27 | r"^((?P\d+?)hr)?((?P\d+?)m)?((?P\d*\.?\d+?)s)?$" 28 | ) 29 | 30 | 31 | def parse_time_delta( 32 | time_str: str, 33 | ) -> Optional[timedelta]: # pylint: disable=inconsistent-return-statements 34 | """Parse a string into a timedelta. Excepts values in the 35 | following formats: 1h, 5m, 10s, etc 36 | 37 | Args: 38 | time_str: str 39 | The string representation of a timedelta 40 | 41 | Returns: 42 | result: Optional[timedelta] 43 | The parsed timedelta if one could be found 44 | """ 45 | parts = regex.match(time_str) 46 | if not parts or all(part is None for part in parts.groupdict().values()): 47 | return None 48 | parts = parts.groupdict() 49 | time_params = {} 50 | for name, param in parts.items(): 51 | if param: 52 | time_params[name] = float(param) 53 | return timedelta(**time_params) 54 | 55 | 56 | ## Identity Util Functions 57 | 58 | 59 | def get_operator_namespace() -> str: 60 | """Get the current namespace from a kubernetes file or config""" 61 | # Default to in cluster namespace file 62 | namespace_file = pathlib.Path( 63 | "/var/run/secrets/kubernetes.io/serviceaccount/namespace" 64 | ) 65 | if namespace_file.is_file(): 66 | return namespace_file.read_text(encoding="utf-8") 67 | return config.python_watch_manager.lock.namespace 68 | 69 | 70 | def get_pod_name() -> str: 71 | """Get the current pod from env variables, config, or hostname""" 72 | 73 | pod_name = config.pod_name 74 | if not pod_name: 75 | log.warning("Pod name not detected, falling back to hostname") 76 | pod_name = platform.node().split(".")[0] 77 | 78 | return pod_name 79 | 80 | 81 | ## Helper functions 82 | 83 | 84 | def obj_to_hash(obj: Any) -> str: 85 | """Get the hash of any jsonable python object 86 | 87 | Args: 88 | obj: Any 89 | The object to hash 90 | 91 | Returns: 92 | hash: str 93 | The hash of obj 94 | """ 95 | return hash(json.dumps(obj, sort_keys=True)) 96 | 97 | 98 | def get_logging_handlers() -> List[logging.Handler]: 99 | """Get the current logging handlers""" 100 | logger = logging.getLogger() 101 | if not logger.handlers: 102 | handler = logging.StreamHandler() 103 | logger.addHandler(handler) 104 | 105 | return logger.handlers 106 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/utils/constants.py: -------------------------------------------------------------------------------- 1 | """Useful Constants""" 2 | 3 | 4 | ## Reconcile Constants 5 | 6 | # Default timeout when joining processes 7 | JOIN_PROCESS_TIMEOUT = 5 8 | 9 | # Default Poll Time for running reconcile cleanup on shutdown 10 | SHUTDOWN_RECONCILE_POLL_TIME = 0.1 11 | 12 | ## Timer Constants 13 | 14 | # Minimum wait time between checks in periodic thread 15 | MIN_SLEEP_TIME = 1 16 | 17 | 18 | ## Filter Constants 19 | 20 | # Only keep a set number of resource versions per watched resource 21 | # this limits the amount of memory used 22 | RESOURCE_VERSION_KEEP_COUNT = 20 23 | 24 | # List of reserved annotations used by the platforms 25 | RESERVED_PLATFORM_ANNOTATIONS = [ 26 | "k8s.io", 27 | "kubernetes.io", 28 | "openshift.io", 29 | ] 30 | -------------------------------------------------------------------------------- /oper8/watch_manager/python_watch_manager/utils/log_handler.py: -------------------------------------------------------------------------------- 1 | """Log handler helper class""" 2 | 3 | # Standard 4 | from logging import Formatter, LogRecord 5 | from logging.handlers import QueueHandler 6 | import copy 7 | 8 | # First Party 9 | import alog 10 | 11 | # Local 12 | from ....managed_object import ManagedObject 13 | 14 | log = alog.use_channel("LOG-HANDLER") 15 | 16 | # Forward declaration of a queue for any type 17 | QUEUE_TYPE = "Queue[Any]" 18 | 19 | 20 | class LogQueueHandler(QueueHandler): 21 | """ 22 | Log Handler class to collect messages from a child processes and pass 23 | them to the root process via a multiprocess queue 24 | """ 25 | 26 | def __init__(self, queue: QUEUE_TYPE, manifest: ManagedObject = None): 27 | """Initialize the queue handler and instance variables 28 | 29 | Args: 30 | queue: "Queue[Any]" 31 | The queue to pass messages to 32 | manifest: ManagedObject 33 | The manifest of the current process. This is only used if it can't find 34 | the resource on the current formatter 35 | """ 36 | super().__init__(queue) 37 | self.manifest = manifest 38 | 39 | def prepare(self, record: LogRecord) -> LogRecord: 40 | """Prep a record for pickling before sending it to the queue 41 | 42 | Args: 43 | record: LogRecord 44 | The record to be prepared 45 | 46 | Returns: 47 | prepared_record: LogRecord 48 | The prepared record ready to be pickled 49 | """ 50 | 51 | # Duplicate record to preserve other handlers 52 | record = copy.copy(record) 53 | 54 | # get the currently used formatter 55 | formatter = self.formatter if self.formatter else Formatter() 56 | 57 | # Exceptions can't always be pickled so manually process 58 | # the record but remove the exc_info This retains the 59 | # the processed exc_txt but allows the parent process to reformat 60 | # the message 61 | if record.exc_info: 62 | record.exc_text = formatter.formatException(record.exc_info) 63 | record.exc_info = None 64 | 65 | # In case there are exceptions/unpicklable objects in the logging 66 | # args then manually compute the message. After computing clear the 67 | # message&args values to allow the parent process to reformat the 68 | # record 69 | record.msg = record.getMessage() 70 | record.args = [] 71 | 72 | # Take the manifest from the current formatter and pass it back up 73 | resource = {} 74 | if hasattr(formatter, "manifest"): 75 | resource = formatter.manifest 76 | elif self.manifest: 77 | resource = self.manifest 78 | 79 | # Only copy required resource keys to the record 80 | resource_metadata = resource.get("metadata", {}) 81 | record.resource = { 82 | "kind": resource.get("kind"), 83 | "apiVersion": resource.get("apiVersion"), 84 | "metadata": { 85 | "name": resource_metadata.get("name"), 86 | "namespace": resource_metadata.get("namespace"), 87 | "resourceVersion": resource_metadata.get("resourceVersion"), 88 | }, 89 | } 90 | 91 | return record 92 | -------------------------------------------------------------------------------- /oper8/x/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The oper8.x module holds common implementations of reusable patterns built on 3 | top of the abstractions in oper8. These are intended as reusable components that 4 | can be share across many operator implementations. 5 | 6 | One of the core principles of oper8 is that the schema for config is entirely up 7 | to the user (with the _only_ exception being spec.version). In oper8.x, this is 8 | not the case and there are many config conventions (CRD schema and backend) that 9 | are encoded into the various utilities. 10 | """ 11 | -------------------------------------------------------------------------------- /oper8/x/datastores/__init__.py: -------------------------------------------------------------------------------- 1 | # Local 2 | from .connection_base import DatastoreConnectionBase 3 | from .cos import CosFactory 4 | from .factory_base import DatastoreSingletonFactoryBase 5 | from .interfaces import Datastore 6 | from .postgres import PostgresFactory 7 | from .redis import RedisFactory 8 | -------------------------------------------------------------------------------- /oper8/x/datastores/connection_base.py: -------------------------------------------------------------------------------- 1 | """ 2 | Base class definition for all datastore connections 3 | """ 4 | 5 | # Standard 6 | from typing import Optional 7 | import abc 8 | 9 | # First Party 10 | import alog 11 | 12 | # Local 13 | from ... import Session, assert_cluster 14 | from ..utils import common 15 | from ..utils.abc_static import ABCStatic 16 | 17 | log = alog.use_channel("DCONN") 18 | 19 | 20 | class DatastoreConnectionBase(ABCStatic): 21 | """ 22 | A DatastoreConnection is an object that holds all of the critical data to 23 | connect to a specific datastore type. A DatastoreConnection for a given 24 | datastore type MUST not care what implementation backs the connection. 25 | """ 26 | 27 | ## Construction ############################################################ 28 | 29 | def __init__(self, session: Session): 30 | """Construct with the session so that it can be saved as a member""" 31 | self._session = session 32 | 33 | @property 34 | def session(self) -> Session: 35 | return self._session 36 | 37 | ## Abstract Interface ###################################################### 38 | 39 | @abc.abstractmethod 40 | def to_dict(self) -> dict: 41 | """Serialize the internal connection details to a dict object which can 42 | be added directly to a subsystem's CR. 43 | 44 | Returns: 45 | config_dict: dict 46 | This dict will hold the keys and values that can be used to add 47 | to a subsystem's datastores.connections section. 48 | """ 49 | 50 | @classmethod 51 | @abc.abstractmethod 52 | def from_dict( 53 | cls, session: Session, config_dict: dict 54 | ) -> "DatastoreConnectionBase": 55 | """Parse a config_dict from a subsystem CR to create an instance of the 56 | DatastoreConnection class. 57 | 58 | Args: 59 | session: Session 60 | The current deploy session 61 | config_dict: dict 62 | This dict will hold the keys and values created by to_dict and 63 | pulled from the subsystem CR. 64 | 65 | Returns: 66 | datastore_connection: DatastoreConnectionBase 67 | The constructed instance of the connection 68 | """ 69 | 70 | ## Shared Utilities ######################################################## 71 | 72 | def _fetch_secret_data(self, secret_name: str) -> Optional[dict]: 73 | """Most connection implementations will need the ability to fetch secret 74 | data from the cluster when loading from the CR dict, so this provides a 75 | common implementation. 76 | 77 | Args: 78 | secret_name: str 79 | The name of the secret to fetch 80 | 81 | Returns: 82 | secret_data: Optional[dict] 83 | The content of the 'data' field in the secret with values base64 84 | decoded if the secret is found, otherwise None 85 | """ 86 | success, content = self.session.get_object_current_state("Secret", secret_name) 87 | assert_cluster(success, f"Fetching connection secret [{secret_name}] failed") 88 | if content is None: 89 | return None 90 | assert "data" in content, "Got a secret without 'data'?" 91 | return { 92 | key: common.b64_secret_decode(val) for key, val in content["data"].items() 93 | } 94 | -------------------------------------------------------------------------------- /oper8/x/datastores/cos/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Top level imports for the cos datastore type 3 | """ 4 | 5 | # Local 6 | from .connection import CosConnection 7 | from .factory import CosFactory 8 | -------------------------------------------------------------------------------- /oper8/x/datastores/cos/factory.py: -------------------------------------------------------------------------------- 1 | """ 2 | COS instance factory 3 | """ 4 | 5 | # Local 6 | from ..factory_base import DatastoreSingletonFactoryBase 7 | from .connection import CosConnection 8 | 9 | 10 | class CosFactory(DatastoreSingletonFactoryBase): 11 | """The common factory that will manage instances of COS for each deploy""" 12 | 13 | DATASTORE_TYPE = "cos" 14 | CONNECTION_TYPE = CosConnection 15 | -------------------------------------------------------------------------------- /oper8/x/datastores/interfaces.py: -------------------------------------------------------------------------------- 1 | """ 2 | Base class for all Datastore component implementations 3 | """ 4 | 5 | # Standard 6 | from typing import Optional 7 | import abc 8 | 9 | # First Party 10 | import aconfig 11 | 12 | # Local 13 | from ... import Session 14 | from ..oper8x_component import Oper8xComponent 15 | from .connection_base import DatastoreConnectionBase 16 | 17 | 18 | class Datastore(Oper8xComponent): 19 | """ 20 | The Datastore baseclass defines the interface that any datastore must 21 | conform to. It is a oper8 Component and should be constructed via a per-type 22 | factory. 23 | """ 24 | 25 | _TYPE_LABEL_ATTRIBUTE = "TYPE_LABEL" 26 | 27 | def __init__( 28 | self, 29 | session: Session, 30 | config: aconfig.Config, 31 | instance_name: Optional[str] = None, 32 | disabled: bool = False, 33 | ): 34 | """This passthrough constructor enforces that all datastores have a 35 | class attribute TYPE_LABEL (str) 36 | """ 37 | type_label = getattr(self, self._TYPE_LABEL_ATTRIBUTE, None) 38 | assert isinstance( 39 | type_label, str 40 | ), f"All datastores types must define {self._TYPE_LABEL_ATTRIBUTE} as a str" 41 | super().__init__(session=session, disabled=disabled) 42 | self._config = config 43 | self.instance_name = instance_name 44 | 45 | @property 46 | def config(self) -> aconfig.Config: 47 | """The config for this instance of the datastore""" 48 | return self._config 49 | 50 | @abc.abstractmethod 51 | def get_connection(self) -> DatastoreConnectionBase: 52 | """Get the connection object for this datastore instance. Each datastore 53 | type must manage a common abstraction for a connection which clients 54 | will use to connect to the datastore. 55 | """ 56 | -------------------------------------------------------------------------------- /oper8/x/datastores/postgres/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common postgres module exposed imports 3 | """ 4 | 5 | # Local 6 | from .connection import PostgresConnection 7 | from .factory import PostgresFactory 8 | -------------------------------------------------------------------------------- /oper8/x/datastores/postgres/factory.py: -------------------------------------------------------------------------------- 1 | """ 2 | Postgres instance factory 3 | """ 4 | 5 | # Local 6 | from ..factory_base import DatastoreSingletonFactoryBase 7 | from .connection import PostgresConnection 8 | 9 | 10 | class PostgresFactory(DatastoreSingletonFactoryBase): 11 | """The common factory that will manage instances of Postgres for each deploy""" 12 | 13 | DATASTORE_TYPE = "postgres" 14 | CONNECTION_TYPE = PostgresConnection 15 | -------------------------------------------------------------------------------- /oper8/x/datastores/postgres/interfaces.py: -------------------------------------------------------------------------------- 1 | """ 2 | Base class interface for a Postgres component 3 | """ 4 | 5 | # Local 6 | from .... import component 7 | from ..interfaces import Datastore 8 | 9 | COMPONENT_NAME = "postgres" 10 | 11 | 12 | @component(COMPONENT_NAME) 13 | class IPostgresComponent(Datastore): 14 | """A postgres chart provides access to a single running Postgres cluster""" 15 | 16 | ## Shared Utilities ######################################################## 17 | 18 | def tls_enabled(self) -> bool: 19 | """Return whether TLS is enabled or not 20 | Returns: 21 | bool: True (TLS enabled), False (TLS disabled) 22 | """ 23 | return self.config.get("tls", {}).get("enabled", True) 24 | -------------------------------------------------------------------------------- /oper8/x/datastores/redis/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Top level imports for the Redis datastore type 3 | """ 4 | 5 | # Local 6 | from .connection import RedisConnection 7 | from .factory import RedisFactory 8 | -------------------------------------------------------------------------------- /oper8/x/datastores/redis/factory.py: -------------------------------------------------------------------------------- 1 | """ 2 | Redis instance factory 3 | """ 4 | 5 | # Local 6 | from ..factory_base import DatastoreSingletonFactoryBase 7 | from .connection import RedisConnection 8 | 9 | 10 | class RedisFactory(DatastoreSingletonFactoryBase): 11 | """The common factory that will manage instances of Redis""" 12 | 13 | DATASTORE_TYPE = "redis" 14 | CONNECTION_TYPE = RedisConnection 15 | -------------------------------------------------------------------------------- /oper8/x/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common utilities for reused components 3 | """ 4 | -------------------------------------------------------------------------------- /oper8/x/utils/abc_static.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module adds metaclass support for declaring an interface with 3 | @abstractmethod methods that MUST be implemented as @classmethod or 4 | @staticmethod 5 | """ 6 | 7 | # Standard 8 | import abc 9 | import inspect 10 | 11 | 12 | class ABCStaticMeta(abc.ABCMeta): 13 | """The StaticABCMeta class is a metaclass that enforces implementations of 14 | base class functions marked as both @abstractmethod and @classmethod. 15 | Methods with this signature MUST be implemented with the @classmethod or 16 | @staticmethod decorator in derived classes. 17 | """ 18 | 19 | def __init__(cls, name, bases, dct): 20 | # Find abstract class methods that have not been implemented at all 21 | attrs = {name: getattr(cls, name) for name in dir(cls)} 22 | cls.__abstract_class_methods__ = [ 23 | name 24 | for name, attr in attrs.items() 25 | if inspect.ismethod(attr) and getattr(attr, "__isabstractmethod__", False) 26 | ] 27 | 28 | # For any abstract class methods that have not been implemented, 29 | # overwrite them to raise NotImplementedError if called 30 | for method_name in cls.__abstract_class_methods__: 31 | 32 | def not_implemented(*_, x=method_name, **__): 33 | raise NotImplementedError(f"Cannot invoke abstract class method {x}") 34 | 35 | not_implemented.__original_signature__ = inspect.signature( 36 | getattr(cls, method_name) 37 | ) 38 | setattr(cls, method_name, not_implemented) 39 | 40 | # Look for abstract class methods of parents 41 | base_abstract_class_methods = { 42 | method_name: getattr(base, method_name) 43 | for base in bases 44 | for method_name in getattr(base, "__abstract_class_methods__", []) 45 | if method_name not in cls.__abstract_class_methods__ 46 | } 47 | 48 | # If any parent abstract class methods have been implemented as instance 49 | # methods, raise an import-time exception 50 | for method_name, base_method in base_abstract_class_methods.items(): 51 | # A local implementation is valid if it is a bound method ( 52 | # implemented as a @classmethod) or it is a function with a 53 | # signature that exactly matches the signature of the base class 54 | # (implemented as @staticmethod). 55 | this_method = getattr(cls, method_name) 56 | is_classmethod = inspect.ismethod(this_method) 57 | original_signature = getattr(base_method, "__original_signature__", None) 58 | is_staticmethod = inspect.isfunction(this_method) and inspect.signature( 59 | this_method 60 | ) in [original_signature, inspect.signature(base_method)] 61 | if not (is_classmethod or is_staticmethod): 62 | raise NotImplementedError( 63 | f"The method [{method_name}] is an @classmethod @abstractmethod. " 64 | f"{cls} implements it as an instance method" 65 | ) 66 | 67 | 68 | class ABCStatic(metaclass=ABCStaticMeta): 69 | """An ABCStatic class is a child of abc.ABC which has support for enforcing 70 | methods which combine @classmethod and @abstractmethod 71 | """ 72 | -------------------------------------------------------------------------------- /oper8/x/utils/constants.py: -------------------------------------------------------------------------------- 1 | """ 2 | Shared constants across the various oper8.x tools 3 | """ 4 | 5 | # The key in the CR spec that holds the datastore information 6 | SPEC_DATASTORES = "datastores" 7 | 8 | # The key inside an individual spec. that holds connection info 9 | SPEC_DATASTORE_CONNECTION = "connection" 10 | 11 | GLOBAL_SLOT = "global" 12 | 13 | DEPS_ANNOTATION = "oper8.org/dependency-hash" 14 | 15 | SERVICE_ACCOUNT_NAME_WORKLOAD_SCHEDULER = "workload-scheduler" 16 | 17 | PULL_POLICY = "IfNotPresent" 18 | -------------------------------------------------------------------------------- /oper8/x/utils/tls_context/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common tls_context module setup 3 | """ 4 | 5 | # Local 6 | from .public import ( 7 | get_client_cert, 8 | get_server_key_cert_pair, 9 | request_server_key_cert_pair, 10 | ) 11 | 12 | default = {} 13 | -------------------------------------------------------------------------------- /oper8/x/utils/tls_context/factory.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module implements a factory for TlsContext implementations 3 | """ 4 | 5 | # Standard 6 | from typing import Optional, Type 7 | 8 | # First Party 9 | import alog 10 | 11 | # Local 12 | from .interface import ITlsContext 13 | from oper8 import Session, assert_config 14 | from oper8.utils import merge_configs 15 | 16 | log = alog.use_channel("TLSFY") 17 | 18 | 19 | ## Interface ################################################################### 20 | 21 | 22 | def get_tls_context( 23 | session: Session, 24 | config_overrides: Optional[dict] = None, 25 | ) -> ITlsContext: 26 | """Get an instance of the configured implementation of the tls context 27 | 28 | Args: 29 | session: Session 30 | The current deploy session 31 | config_overrides: Optional[dict] 32 | Optional runtime config values. These will overwrite any values 33 | pulled from the session.config 34 | 35 | Returns: 36 | tls_context: ITlsContext 37 | The constructed instance of the context 38 | """ 39 | return _TlsContextSingletonFactory.get_tls_context( 40 | session, 41 | config_overrides=config_overrides, 42 | ) 43 | 44 | 45 | def register_tls_context_type(context_class: Type[ITlsContext]): 46 | """Register a constructor for a given context implementation type 47 | 48 | Args: 49 | context_class: Type[ITlsContext] 50 | The ITlsContext child class to register 51 | """ 52 | _TlsContextSingletonFactory.register(context_class) 53 | 54 | 55 | ## Implementation Details ###################################################### 56 | 57 | 58 | class _TlsContextSingletonFactory: 59 | """The _TlsContextSingletonFactory will manage a singleton instance of an 60 | ITlsContext based on the session's config. 61 | """ 62 | 63 | # The section of the app_config that will hold the config 64 | _APP_CONFIG_SECTION = "tls" 65 | _CONFIG_TYPE_FIELD = "type" 66 | 67 | # Internal class dict holding the registered types 68 | _registered_types = {} 69 | 70 | # Singleton instance details 71 | _instance = None 72 | _instance_deploy_id = None 73 | 74 | ## Interface ############################################################### 75 | 76 | @classmethod 77 | def get_tls_context( 78 | cls, 79 | session: Session, 80 | config_overrides: Optional[dict] = None, 81 | ) -> ITlsContext: 82 | """Get an instance of the configured implementation of the tls context 83 | 84 | Args: 85 | session: Session 86 | The current deploy session 87 | config_overrides: Optional[dict] 88 | Optional runtime config values. These will overwrite any values 89 | pulled from the session.config 90 | 91 | Returns: 92 | tls_context: ITlsContext 93 | The constructed instance of the context 94 | """ 95 | # Check to see if this instance already exists 96 | if cls._instance is None or cls._instance_deploy_id != session.id: 97 | log.debug("Constructing TlsContext for [%s]", session.id) 98 | 99 | # Get the config 100 | tls_config = merge_configs( 101 | session.config.get(cls._APP_CONFIG_SECTION), 102 | config_overrides or {}, 103 | ) 104 | assert_config( 105 | tls_config is not None, 106 | f"Missing required config section: {cls._APP_CONFIG_SECTION}", 107 | ) 108 | type_label = tls_config.get(cls._CONFIG_TYPE_FIELD) 109 | type_class = cls._registered_types.get(type_label) 110 | assert_config( 111 | type_class is not None, 112 | f"Cannot construct unknown TlsContext type [{type_label}]", 113 | ) 114 | 115 | # Construct the instance 116 | cls._instance = type_class(session=session, config=tls_config) 117 | cls._instance_deploy_id = session.id 118 | 119 | log.debug2("Returning TlsContext for [%s]", session.id) 120 | return cls._instance 121 | 122 | @classmethod 123 | def register(cls, context_class: Type[ITlsContext]): 124 | """Register a constructor for a given context implementation type 125 | 126 | Args: 127 | context_class: Type[ITlsContext] 128 | The ITlsContext child class to register 129 | """ 130 | assert hasattr(context_class, ITlsContext._TYPE_LABEL_ATTRIBUTE), ( 131 | "All derived ITlsContext classes must have an attribute " 132 | f"{ITlsContext._TYPE_LABEL_ATTRIBUTE}" 133 | ) 134 | type_label = getattr(context_class, ITlsContext._TYPE_LABEL_ATTRIBUTE) 135 | if type_label in cls._registered_types: 136 | log.warning( 137 | "Received non-unique %s for %s: %s", 138 | ITlsContext._TYPE_LABEL_ATTRIBUTE, 139 | context_class, 140 | type_label, 141 | ) 142 | log.debug2("Registering tls context type [%s]", type_label) 143 | cls._registered_types[type_label] = context_class 144 | -------------------------------------------------------------------------------- /oper8/x/utils/tls_context/public.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file holds functions that should be used outside of this module by 3 | components, subsystems, and applications that need access to the TLS context 4 | functionality. 5 | """ 6 | 7 | # Standard 8 | from typing import Tuple 9 | 10 | # Local 11 | from .factory import get_tls_context, register_tls_context_type 12 | from .internal import InternalTlsContext 13 | from oper8 import Session 14 | 15 | ## Type Registrations ########################################################## 16 | 17 | register_tls_context_type(InternalTlsContext) 18 | 19 | ## Public Functions ############################################################ 20 | 21 | 22 | def request_server_key_cert_pair( 23 | session: Session, 24 | *args, 25 | **kwargs, 26 | ) -> None: 27 | """Request creation of the PEM encoded value of the key/cert pair for a 28 | given server. This function has to be called from before render_chart is 29 | called. I.e., parse_config / Component constructor phase. Implementations of 30 | this function will generate the pair (in background) if it has not been 31 | already requested. 32 | 33 | Args: 34 | session: Session 35 | The current deploy session 36 | 37 | Passthrough Args: 38 | See ITlsContext.request_server_key_cert_pair 39 | """ 40 | return get_tls_context(session).request_server_key_cert_pair(*args, **kwargs) 41 | 42 | 43 | def get_server_key_cert_pair( 44 | session: Session, 45 | *args, 46 | **kwargs, 47 | ) -> Tuple[str, str]: 48 | """Get the previously requested PEM encoded value of the key/cert pair for a 49 | given server. Implementations will retrieveh the pair if it does not exist 50 | and will fetch its content if it does. If the content is not available, the 51 | assertion is triggered. 52 | 53 | Args: 54 | session: Session 55 | The current deploy session 56 | 57 | Passthrough Args: 58 | See ITlsContext.get_server_key_cert_pair 59 | 60 | Returns: 61 | key_pem: str 62 | This is the pem-encoded key content (base64 encoded if 63 | encode is set) 64 | cert_pem: str 65 | This is the pem-encoded cert content (base64 encoded 66 | if encode is set) 67 | """ 68 | return get_tls_context(session).get_server_key_cert_pair(*args, **kwargs) 69 | 70 | 71 | def get_client_cert( 72 | session: Session, 73 | *args, 74 | **kwargs, 75 | ) -> str: 76 | """Get the CA's public cert 77 | 78 | Args: 79 | session: Session 80 | The current deploy session 81 | 82 | Passthrough Args: 83 | See ITlsContext.get_client_cert 84 | 85 | Returns: 86 | crt_pem: Optional[str] 87 | The pem-encoded cert (base64 encoded if encode set), 88 | """ 89 | return get_tls_context(session).get_client_cert(*args, **kwargs) 90 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=60", "setuptools-scm>=8.0"] 3 | 4 | [project] 5 | name = "oper8" 6 | dynamic = ["version"] 7 | description = "Python-native Kubernetes operator framework for managing trees of components" 8 | license = {text = "Apache-2.0"} 9 | readme = "README.md" 10 | requires-python = ">=3.9" 11 | classifiers=[ 12 | "License :: OSI Approved :: Apache Software License" 13 | ] 14 | 15 | dependencies = [ 16 | "alchemy-config>=1.0.0,<2", 17 | "alchemy-logging>=1.0.1,<2", 18 | "openshift>=0.13.1,<0.14", 19 | "python-dateutil>=2.8.1,<3", 20 | "deepdiff>=8.5,<9", 21 | "jsonpatch>=1.26,<2", 22 | "semver>=2.13.0,<4", 23 | "kubernetes>=25.3.0,<33", 24 | # Until https://github.com/libgit2/pygit2/issues/1292 is resolved 25 | "pygit2>=1.12.0,<1.18", 26 | ] 27 | 28 | [project.scripts] 29 | 30 | oper8 = "oper8.__main__:main" 31 | 32 | [project.optional-dependencies] 33 | 34 | ## User Extra Sets ## 35 | 36 | ansible = [ 37 | # This will isntall the k8s extension module needed to actually run 38 | "ansible~=2.9.0", 39 | ] 40 | 41 | test-helpers = [ 42 | "pytest>=6", 43 | ] 44 | 45 | tls-context = [ 46 | "cryptography>=41.0.4", 47 | ] 48 | 49 | # NOTE: This is "all" from the user perspective, not the dev perspective 50 | all = [ 51 | "oper8[ansible, test-helpers, tls-context]", 52 | ] 53 | 54 | ## Dev Extra Sets ## 55 | 56 | dev-test = [ 57 | "pytest>=6", 58 | "pytest-cov>=2.10.1", 59 | "pytest-timeout>=2.1.0,<3", 60 | "dill>=0.4,<0.5", 61 | "Flask>=2.3,<4", 62 | ] 63 | 64 | dev-docs = [ 65 | "mkdocs-material>=9.5.46", 66 | "mkdocstrings-python>=1.12.2" 67 | ] 68 | 69 | dev-fmt = [ 70 | "pre-commit>=3.0.4,<5.0", 71 | "ruff==0.11.13", 72 | ] 73 | 74 | dev-build = [ 75 | "setuptools>=60", 76 | "setuptools-scm>=8.0", 77 | ] 78 | 79 | # NOTE: This is "all" from the user and dev perspective 80 | all-dev = [ 81 | "oper8[all, dev-test, dev-docs, dev-fmt, dev-build]" 82 | ] 83 | 84 | [project.urls] 85 | Source = "https://github.com/IBM/oper8" 86 | 87 | 88 | [tool.setuptools.packages.find] 89 | where = [""] 90 | include = ["oper8"] 91 | 92 | [tool.setuptools_scm] 93 | write_to = "oper8/_version.py" 94 | 95 | [tool.pytest.ini_options] 96 | markers = [ 97 | "ansible: marks tests that depend on ansible", 98 | ] 99 | 100 | 101 | [tool.ruff] 102 | line-length = 100 103 | target-version = "py38" 104 | 105 | 106 | [tool.ruff.lint] 107 | select = [ "E", "F", "UP", "B", "SIM", "I"] 108 | ignore = [ 109 | "UP032", # f-string 110 | "UP034", # extraneous-parentheses 111 | # "UP035", # deprecated-import 112 | 113 | ## original errors fromt pylint 114 | "F403", # unable to detect undefined names 115 | "I001", # import block unsorted/unformatted 116 | "E402", # module level import not at top of file 117 | # "B028", # warnings: no explicit stacklevel keyword argument found 118 | # "I0001", # raw-checker-failed 119 | # "I0010", # bad-inline-option 120 | # "I0011", # locally-disabled 121 | # "I0013", # file-ignored 122 | # "I0020", # suppressed-message 123 | # "I0021", # useless-suppression 124 | # "I0022", # deprecated-pragma 125 | 126 | # "I0023", # use-symbolic-message-instead 127 | # "C0103", # invalid-name 128 | # "C0115", # missing-class-docstring 129 | # "C0114", # missing-module-docstring 130 | # "C0116", # missing-function-docstring 131 | # "C0209", # consider-using-f-string 132 | # "R1710", # inconsistent-return-statements 133 | # "E1101", # no-member 134 | # "R0913", # too-many-arguments 135 | # "R0914", # too-many-locals 136 | # "R0912", # too-many-branches 137 | # "R0915", # too-many-statements 138 | # "R0401", # cyclic-import 139 | # "R0903", # too-few-public-methods 140 | # "W0212", # protected-access 141 | # "W0511", # fixme 142 | # "W1202", # logging-format-interpolation 143 | # "E1205", # logging-too-many-args 144 | # "W0201", # attribute-defined-outside-init 145 | # "W0223", # abstract-method 146 | # "W0104", # pointless-statement 147 | # "C0411", # wrong-import-order 148 | ] 149 | 150 | [tool.ruff.lint.per-file-ignores] 151 | "__init__.py" = [ 152 | "F401", # imported but unused 153 | "F403" # unable to detect undefined names 154 | ] 155 | "oper8/watch_manager/ansible_watch_manager/modules/k8s_application.py" = [ 156 | # Ansible requires UTF-8 encoding declaration for Ansible modules 157 | # https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_documenting.html#python-shebang-utf-8-coding 158 | "UP009" # UTF-8 encoding declaration is unnecessary 159 | ] 160 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": ["config:recommended"] 4 | } 5 | -------------------------------------------------------------------------------- /scripts/check_heartbeat.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ################################################################################ 4 | # This utility script can be used as a kubernetes liveness/readiness probe when 5 | # using the python watch manager. A sample pod configuration looks like the 6 | # following: 7 | # 8 | # spec: 9 | # ... 10 | # containers: 11 | # - name: operator 12 | # ... 13 | # env: 14 | # - name: WATCH_MANAGER 15 | # value: python 16 | # - name: PYTHON_WATCH_MANAGER_HEARTBEAT_FILE 17 | # value: /tmp/heartbeat.txt 18 | # livenessProbe: 19 | # exec: 20 | # command: 21 | # - check_heartbeat.sh 22 | # - /tmp/heartbeat/txt 23 | # - "120" 24 | # readinessProbe: 25 | # exec: 26 | # command: 27 | # - check_heartbeat.sh 28 | # - /tmp/heartbeat/txt 29 | # - "60" 30 | ################################################################################ 31 | 32 | if [ "$#" -lt "2" ] 33 | then 34 | echo "Usage: $0 " 35 | exit 1 36 | fi 37 | 38 | heartbeat_file=$1 39 | delta=$2 40 | 41 | stamp=$(date -d "$(cat $heartbeat_file)" +%s) 42 | test $(expr "$stamp" + "$delta") -gt $(date +%s) 43 | -------------------------------------------------------------------------------- /scripts/document.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | BASE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" 5 | cd "$BASE_DIR" 6 | 7 | mkdocs_opt=("$@") 8 | 9 | if [[ ${#mkdocs_opt[@]} -eq 0 ]]; then 10 | echo "No options provided. Running 'mkdocs build'..." 11 | mkdocs build 12 | exit 0 13 | fi 14 | 15 | # Modify set e because serve will be aborted with ctrl C. 16 | if [[ "${mkdocs_opt[0]}" == "serve" ]]; then 17 | set +e 18 | echo "Serving the documentation. Abort with ctrl C." 19 | fi 20 | 21 | echo "Running 'mkdocs ${mkdocs_opt[@]}'..." 22 | mkdocs "${mkdocs_opt[@]}" 23 | -------------------------------------------------------------------------------- /scripts/fmt.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | pre-commit run --all-files 4 | RETURN_CODE=$? 5 | 6 | function echoWarning() { 7 | LIGHT_YELLOW='\033[1;33m' 8 | NC='\033[0m' # No Color 9 | echo -e "${LIGHT_YELLOW}${1}${NC}" 10 | } 11 | 12 | if [ "$RETURN_CODE" -ne 0 ]; then 13 | if [ "${CI}" != "true" ]; then 14 | echoWarning "☝️ This appears to have failed, but actually your files have been formatted." 15 | echoWarning "Make a new commit with these changes before making a pull request." 16 | else 17 | echoWarning "This test failed because your code isn't formatted correctly." 18 | echoWarning 'Locally, run `make run fmt`, it will appear to fail, but change files.' 19 | echoWarning "Add the changed files to your commit and this stage will pass." 20 | fi 21 | 22 | exit $RETURN_CODE 23 | fi 24 | -------------------------------------------------------------------------------- /scripts/lint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cd $(dirname ${BASH_SOURCE[0]})/.. 4 | 5 | fnames="" 6 | for fname in $@ 7 | do 8 | if [[ "$fname" == *".py" ]] || [ -d $fname ] && [[ "$fname" == "oper8"* ]] 9 | then 10 | fnames="$fnames $fname" 11 | else 12 | echo "Ignoring non-library file: $fname" 13 | fi 14 | done 15 | if [ "$fnames" == "" ] 16 | then 17 | fnames="oper8" 18 | fi 19 | 20 | ruff check $arg $fnames 21 | -------------------------------------------------------------------------------- /scripts/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | BASE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" 5 | cd "$BASE_DIR" 6 | 7 | pytest_opts=("$@") 8 | allow_warnings=${ALLOW_WARNINGS:-"0"} 9 | if [ "$allow_warnings" = "1" ]; then 10 | warn_arg="" 11 | else 12 | # NOTE: The AnsibleWatchManager raises an ImportWarning when ansible imports 13 | # systemd under the hood 14 | warn_arg="-W error -W ignore::ImportWarning" 15 | 16 | # If running with 3.12 or later, some of the dependencies use deprecated 17 | # functionality 18 | if [ "$(python --version | cut -d' ' -f 2 | cut -d'.' -f 2)" -gt "11" ]; then 19 | warn_arg="$warn_arg -W ignore::DeprecationWarning" 20 | fi 21 | fi 22 | 23 | # Show the test coverage when running the whole test, otherwise omit. 24 | if [[ "${pytest_opts[*]}" != *"tests/"* ]]; then 25 | pytest_opts+=( 26 | --cov-config=.coveragerc 27 | --cov=oper8 28 | --cov-report=term 29 | --cov-report=html 30 | --cov-fail-under=85.00 31 | ) 32 | fi 33 | 34 | PYTHONPATH="${BASE_DIR}:$PYTHONPATH" python3 -m pytest \ 35 | $warn_arg "${pytest_opts[@]}" 36 | -------------------------------------------------------------------------------- /setup_requirements.txt: -------------------------------------------------------------------------------- 1 | tox>=4.4.2,<5 2 | build>=0.10.0,<2.0 3 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/oper8/63d30d3d38147283ba9621c625aa83f4461ead22/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """ 2 | Shared test config 3 | """ 4 | # Standard 5 | from unittest import mock 6 | import sys 7 | 8 | # Third Party 9 | import pytest 10 | 11 | # Local 12 | from oper8.reconcile import ReconcileManager 13 | from oper8.test_helpers.helpers import ( 14 | config_detail_dict, 15 | configure_logging, 16 | version_safe_md5, 17 | ) 18 | 19 | configure_logging() 20 | 21 | 22 | @pytest.fixture(autouse=True) 23 | def no_local_kubeconfig(): 24 | """This fixture makes sure the tests run as if KUBECONFIG is not exported in 25 | the environment, even if it is 26 | """ 27 | with mock.patch( 28 | "kubernetes.config.new_client_from_config", side_effect=RuntimeError 29 | ): 30 | yield 31 | 32 | 33 | @pytest.fixture(autouse=True) 34 | def fork_multiprocess(): 35 | """This fixture makes sure that we use fork for multiprocessing instead of 36 | spawn. Spawn is more reliable with FIPs and OpenSSL but causes issues when 37 | pickling mocked or patched objects. 38 | """ 39 | # Don't use library_config since some tests read the config object directly 40 | config_detail_dict.python_watch_manager.process_context = "fork" 41 | 42 | 43 | @pytest.fixture(autouse=True) 44 | def no_unimport_oper8_mods(): 45 | """Since our helper classes themselves live within the top of oper8, the 46 | real logic for unimporting the controller modules will recursively reimport 47 | _all_ oper8.* modules. This causes a lot of problems with tests like 48 | misbehaving mocks, Controller is not Controller, etc... 49 | """ 50 | real_unimport = ReconcileManager._unimport_controller_module 51 | 52 | @staticmethod 53 | def _patched_unimport(module_name): 54 | oper8_mods = { 55 | mod_name: mod 56 | for mod_name, mod in sys.modules.items() 57 | if mod_name.startswith("oper8.") 58 | and not mod_name.startswith("oper8.test_helpers") 59 | } 60 | reimport_modules = real_unimport(module_name) 61 | for mod_name, mod in oper8_mods.items(): 62 | sys.modules.setdefault(mod_name, mod) 63 | return reimport_modules 64 | 65 | with mock.patch( 66 | "oper8.reconcile.ReconcileManager._unimport_controller_module", 67 | new=_patched_unimport, 68 | ): 69 | yield 70 | -------------------------------------------------------------------------------- /tests/dag/test_completion_state.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the DAG CompletionState class 3 | """ 4 | 5 | # First Party 6 | import alog 7 | 8 | # Local 9 | from oper8.dag import CompletionState, Node 10 | 11 | log = alog.use_channel("TEST") 12 | 13 | ################################################################################ 14 | ## Completion State ####################################################################### 15 | ################################################################################ 16 | 17 | 18 | def test_dag_completion_state_str(): 19 | """Coverage test to ensure that stringifying a CompletionState doesn't 20 | throw! Coverage :) 21 | """ 22 | log.debug( 23 | str( 24 | CompletionState( 25 | verified_nodes=[Node("A")], 26 | unverified_nodes=[Node("B")], 27 | failed_nodes=[Node("C")], 28 | unstarted_nodes=[Node("D")], 29 | ) 30 | ) 31 | ) 32 | -------------------------------------------------------------------------------- /tests/dag/test_graph.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the DAG Graph Class functionality 3 | """ 4 | # Third Party 5 | import pytest 6 | 7 | # Local 8 | from oper8.dag import Graph, Node 9 | 10 | ################################################################################ 11 | ## Graph Tests ############################################################# 12 | ################################################################################ 13 | 14 | 15 | def test_creation(): 16 | """Test node initialization""" 17 | graph = Graph() 18 | assert graph.root 19 | 20 | 21 | def test_add_node(): 22 | """Test node children functions""" 23 | graph = Graph() 24 | node_a = Node("a") 25 | graph.add_node(node_a) 26 | assert graph.root.has_child(node_a) 27 | 28 | 29 | def test_add_node_exception(): 30 | """Test node children functions""" 31 | graph = Graph() 32 | with pytest.raises(ValueError): 33 | graph.add_node(Node(None)) 34 | 35 | graph.add_node(Node("a")) 36 | with pytest.raises(ValueError): 37 | graph.add_node(Node("a")) 38 | 39 | 40 | def test_add_node_dependency(): 41 | """Test node children functions""" 42 | graph = Graph() 43 | node_a = Node("a") 44 | node_b = Node("b") 45 | node_c = Node("c") 46 | node_d = Node("d") 47 | graph.add_node(node_a) 48 | graph.add_node(node_b) 49 | graph.add_node(node_c) 50 | graph.add_node(node_d) 51 | graph.add_node_dependency(node_a, node_b) 52 | graph.add_node_dependency(node_a, node_c, "testdata") 53 | graph.add_node_dependency(node_b, node_c) 54 | graph.add_node_dependency(node_c, node_d) 55 | 56 | assert node_a.get_children() == [(node_b, None), (node_c, "testdata")] 57 | 58 | 59 | def test_add_node_dependency_exception(): 60 | """Test node children functions""" 61 | graph = Graph() 62 | node_a = Node("a") 63 | node_b = Node("b") 64 | graph.add_node(node_a) 65 | graph.add_node(node_b) 66 | 67 | with pytest.raises(ValueError): 68 | graph.add_node_dependency(node_a, Node("c")) 69 | with pytest.raises(ValueError): 70 | graph.add_node_dependency(Node("c"), node_a) 71 | 72 | 73 | def test_accessors(): 74 | """Test node children functions""" 75 | graph = Graph() 76 | node_a = Node("a") 77 | node_b = Node("b") 78 | graph.add_node(node_a) 79 | graph.add_node(node_b) 80 | 81 | assert graph.get_node("a") == node_a 82 | assert graph.get_all_nodes() == [node_a, node_b] 83 | assert graph.has_node(node_a) 84 | assert node_a in graph 85 | assert not graph.has_node(Node("c")) 86 | 87 | 88 | def test_topology(): 89 | graph = Graph() 90 | node_a = Node("a") 91 | node_b = Node("b") 92 | graph.add_node(node_a) 93 | graph.add_node(node_b) 94 | graph.add_node_dependency(node_b, node_a) 95 | 96 | assert graph.topology() == [node_a, node_b] 97 | 98 | 99 | def test_topology_ordering(): 100 | """Test node topology function""" 101 | graph = Graph() 102 | node_a = Node("a") 103 | node_b = Node("b") 104 | node_c = Node("c") 105 | graph.add_node(node_c) 106 | graph.add_node(node_b) 107 | graph.add_node(node_a) 108 | 109 | assert graph.topology() == [node_a, node_b, node_c] 110 | 111 | 112 | def test_str(): 113 | graph = Graph() 114 | node_a = Node("a") 115 | node_b = Node("b") 116 | graph.add_node(node_a) 117 | graph.add_node(node_b) 118 | graph.add_node_dependency(node_a, node_b) 119 | 120 | assert str(graph) == "Graph({a:[b],b:[]})" 121 | -------------------------------------------------------------------------------- /tests/dag/test_node.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the Node and ResourceNode classes functionality 3 | """ 4 | 5 | # Third Party 6 | import pytest 7 | 8 | # Local 9 | from oper8.dag import Node, ResourceNode 10 | 11 | ################################################################################ 12 | ## Node Tests ############################################################# 13 | ################################################################################ 14 | 15 | 16 | def test_node_creation(): 17 | """Test node initialization""" 18 | node = Node() 19 | assert node.get_name() == None 20 | assert node.get_data() == None 21 | node = Node("test", "data") 22 | assert node.get_name() == "test" 23 | assert node.get_data() == "data" 24 | 25 | 26 | def test_node_children(): 27 | """Test node children functions""" 28 | node_a = Node("a") 29 | node_b = Node("b") 30 | node_c = Node("c") 31 | node_a.add_child(node_b) 32 | node_a.add_child(node_c, "testdata") 33 | 34 | assert node_a.has_child(node_b) 35 | assert node_a.get_children() == [(node_b, None), (node_c, "testdata")] 36 | 37 | node_a.remove_child(node_b) 38 | assert not node_a.has_child(node_b) 39 | assert node_a.get_children() == [(node_c, "testdata")] 40 | 41 | 42 | def test_node_cyclic_dependency(): 43 | """Test node children functions""" 44 | node_a = Node("a") 45 | node_b = Node("b") 46 | node_a.add_child(node_b) 47 | with pytest.raises(ValueError): 48 | node_b.add_child(node_a) 49 | 50 | 51 | def test_node_topology(): 52 | """Test node topology function""" 53 | 54 | # Graph 55 | # a->b->c 56 | node_a = Node("a") 57 | node_b = Node("b") 58 | node_c = Node("c") 59 | node_a.add_child(node_b) 60 | node_b.add_child(node_c) 61 | 62 | assert node_a.topology() == [node_c, node_b, node_a] 63 | assert node_b.topology() == [node_c, node_b] 64 | assert node_c.topology() == [node_c] 65 | 66 | 67 | def test_node_dfs(): 68 | """Test node dfs search""" 69 | 70 | # Graph 71 | # a 72 | # / \ 73 | # b c 74 | # | 75 | # d 76 | node_a = Node("a") 77 | node_b = Node("b") 78 | node_c = Node("c") 79 | node_d = Node("d") 80 | node_a.add_child(node_b) 81 | node_a.add_child(node_c) 82 | node_b.add_child(node_d) 83 | 84 | # node a can reach all children 85 | assert node_a.dfs(node_b) and node_a.dfs(node_c) and node_a.dfs(node_d) 86 | # node b can only reach d and not c 87 | assert node_b.dfs(node_d) and not node_b.dfs(node_c) 88 | # node_d can't reach anything except itself 89 | assert ( 90 | node_d.dfs(node_d) 91 | and not node_d.dfs(node_b) 92 | and not node_d.dfs(node_c) 93 | and not node_d.dfs(node_a) 94 | ) 95 | 96 | 97 | def test_node_equality(): 98 | # Check equality 99 | node_a = Node("a") 100 | assert node_a != Node("b") 101 | assert node_a == Node("a") 102 | assert node_a != "arandomtype" 103 | 104 | # Check sorting 105 | assert node_a < Node("b") 106 | assert not Node("z") < node_a 107 | assert not Node("z") < "arandomtype" 108 | 109 | 110 | def test_node_descriptors(): 111 | assert hash(Node("a")) == hash("a") 112 | assert str(Node("a")) == "Node('a', None)" 113 | 114 | 115 | ################################################################################ 116 | ## Resource Node Tests ############################################################# 117 | ################################################################################ 118 | 119 | 120 | def create_dummy_kube_resource(): 121 | return { 122 | "kind": "Foo", 123 | "apiVersion": "foo.bar/v1", 124 | "metadata": { 125 | "name": "foo", 126 | "namespace": "default", 127 | }, 128 | } 129 | 130 | 131 | def test_resource_node_attributes(): 132 | node = ResourceNode("a", create_dummy_kube_resource()) 133 | assert node.api_group == "foo.bar" 134 | assert node.api_version == "foo.bar/v1" 135 | assert node.kind == "Foo" 136 | assert node.metadata == {"name": "foo", "namespace": "default"} 137 | assert node.name == "foo" 138 | 139 | 140 | def test_resource_node_add_dependency(): 141 | node_a = ResourceNode("a", create_dummy_kube_resource()) 142 | node_b = ResourceNode("b", create_dummy_kube_resource()) 143 | node_a.add_dependency(node_b) 144 | assert node_a.has_child(node_b) 145 | assert node_a.get_children() == [(node_b, None)] 146 | -------------------------------------------------------------------------------- /tests/deploy_manager/test_replace_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for the replace_utils functionality 3 | """ 4 | 5 | # Standard 6 | 7 | # Third Party 8 | import pytest 9 | 10 | # First Party 11 | import alog 12 | 13 | # Local 14 | from oper8.deploy_manager.replace_utils import _REPLACE_FUNCS, requires_replace 15 | 16 | ## Helpers ##################################################################### 17 | 18 | log = alog.use_channel("TEST") 19 | 20 | 21 | def sample_object(): 22 | return { 23 | "original_value": "original", 24 | "envs": [ 25 | { 26 | "name": "first", 27 | "value": "True", 28 | }, 29 | { 30 | "name": "second", 31 | "valueFrom": "False", 32 | }, 33 | ], 34 | "dicts_in_lists": [ 35 | {"someDict": {"someValue": "onetwo", "other": "threefour"}}, 36 | ], 37 | "list": [ 38 | {"name": "container1"}, 39 | {"name": "container2"}, 40 | ], 41 | } 42 | 43 | 44 | ## Replace functions ################################################################## 45 | 46 | 47 | @pytest.mark.parametrize( 48 | ["desired_obj"], 49 | [ 50 | [ 51 | { 52 | "envs": [ 53 | { 54 | "name": "first", 55 | "valueFrom": "True", 56 | }, 57 | { 58 | "name": "second", 59 | "valueFrom": "False", 60 | }, 61 | ], 62 | } 63 | ], 64 | [ 65 | { 66 | "envs": [ 67 | { 68 | "name": "first", 69 | "value": "True", 70 | }, 71 | { 72 | "name": "second", 73 | "value": "True", 74 | }, 75 | ], 76 | } 77 | ], 78 | ], 79 | ) 80 | def test_value_operations(desired_obj): 81 | """Test that adding a ref to an object with none present adds as expected""" 82 | current_obj = sample_object() 83 | assert requires_replace(current_obj, desired_obj) 84 | # Ensure each replace function is still able to be called 85 | for func in _REPLACE_FUNCS: 86 | func(current_obj, desired_obj) 87 | 88 | 89 | @pytest.mark.parametrize( 90 | ["desired_obj", "requires"], 91 | [ 92 | [ 93 | { 94 | "list": [ 95 | {"name": "container1"}, 96 | {"name": "container2"}, 97 | {"name": "container3"}, 98 | ], 99 | }, 100 | False, 101 | ], 102 | [ 103 | { 104 | "dicts_in_lists": [ 105 | {"someDict": {"someValue": "onetwo", "other": "threefour"}}, 106 | ], 107 | }, 108 | False, 109 | ], 110 | [ 111 | { 112 | "dicts_in_lists": [ 113 | {"someDict": {"someValue": "onetwo", "other": "threefour"}}, 114 | {"appendedDict": {"someValue": "onetwo", "other": "threefour"}}, 115 | ], 116 | }, 117 | False, 118 | ], 119 | [ 120 | { 121 | "list": [ 122 | {"name": "container1"}, 123 | ], 124 | }, 125 | True, 126 | ], 127 | [ 128 | { 129 | "list": [ 130 | {"name": "container1"}, 131 | {"name": "container_changed"}, 132 | ], 133 | }, 134 | True, 135 | ], 136 | [ 137 | { 138 | "list": [ 139 | {"name": "container1"}, 140 | {"name": "container4"}, 141 | ], 142 | }, 143 | True, 144 | ], 145 | ], 146 | ) 147 | def test_list_operations(desired_obj, requires) -> None: 148 | """Test that adding a ref to an object with none present adds as expected""" 149 | current_obj = sample_object() 150 | assert requires_replace(current_obj, desired_obj) == requires 151 | # Ensure each replace function is still able to be called 152 | for func in _REPLACE_FUNCS: 153 | func(current_obj, desired_obj) 154 | 155 | 156 | ## Patch functions ################################################################## 157 | 158 | 159 | @pytest.mark.parametrize( 160 | ["desired_obj"], 161 | [ 162 | [{"new_value": "patched"}], 163 | [{"added_list": [{"new_value"}]}], 164 | [{"original_value": "patched"}], 165 | ], 166 | ) 167 | def test_patch_operations(desired_obj): 168 | current_obj = sample_object() 169 | assert not requires_replace(current_obj, desired_obj) 170 | -------------------------------------------------------------------------------- /tests/test_decorator.py: -------------------------------------------------------------------------------- 1 | # Standard 2 | from unittest.mock import Mock 3 | 4 | # Third Party 5 | import pytest 6 | 7 | # Local 8 | from oper8 import Component, Controller, component, controller 9 | from oper8.test_helpers.helpers import library_config, setup_cr, setup_session 10 | 11 | ## @component ################################################################## 12 | 13 | mock_verify = Mock() 14 | 15 | 16 | @component(name="pure_comp") 17 | class PureComponent(Component): 18 | def __init__(self, session): 19 | super().__init__(session=session) 20 | self.add_resource( 21 | "test_obj", 22 | { 23 | "kind": "test", 24 | "apiVersion": "v1", 25 | "metadata": {"name": session.get_scoped_name("test_obj")}, 26 | }, 27 | ) 28 | 29 | # Attach the verify mock 30 | self.verify = mock_verify.method 31 | 32 | 33 | def test_component_creation(): 34 | """Ensure that decorated components are created correctly""" 35 | 36 | session = setup_session() 37 | 38 | # Pure components are a Component 39 | comp = PureComponent(session) 40 | assert isinstance(comp, Component) 41 | 42 | # Pure components can be built 43 | config = comp.to_config(session) 44 | assert config[0].metadata.name == session.get_scoped_name("test_obj") 45 | 46 | # Pure components have a class attribute "name" 47 | assert hasattr(comp.__class__, "name") 48 | 49 | 50 | ## @controller ################################################################# 51 | 52 | setup_components_mock = Mock() 53 | 54 | 55 | @controller( 56 | group="foo.bar", 57 | version="v1", 58 | kind="Foo", 59 | ) 60 | class FooController(Controller): 61 | def setup_components(self, *_, **__): 62 | setup_components_mock() 63 | 64 | 65 | def test_controller_creation(): 66 | """Make sure that an @controller decorated class can be created""" 67 | 68 | # Set up the controller 69 | with library_config(dry_run=True): 70 | ctrlr = FooController() 71 | assert isinstance(ctrlr, Controller) 72 | 73 | # Check class properties 74 | assert ctrlr.__class__.group == "foo.bar" 75 | assert ctrlr.__class__.version == "v1" 76 | assert ctrlr.__class__.kind == "Foo" 77 | 78 | # Roll out 79 | ctrlr.run_reconcile(setup_session()) 80 | 81 | 82 | def test_controller_extra_properties(): 83 | """Make sure that the extra_properties argument to @controller properly 84 | attaches the given properties to the Controller class 85 | """ 86 | 87 | @controller( 88 | group="foo.bar", 89 | version="v2", 90 | kind="Bar", 91 | extra_properties={"foo": "bar"}, 92 | ) 93 | class BarController(Controller): 94 | def setup_components(self, *_, **__): 95 | setup_components_mock() 96 | 97 | assert hasattr(BarController, "foo") 98 | assert BarController.foo == "bar" 99 | 100 | 101 | def test_controller_finalizer(): 102 | """Make sure that the extra_properties argument to @controller properly 103 | attaches the given properties to the Controller class 104 | """ 105 | 106 | @controller( 107 | group="foo.bar", 108 | version="v2", 109 | kind="Bar", 110 | finalizer="test", 111 | ) 112 | class BarController(Controller): 113 | def setup_components(self, *_, **__): 114 | setup_components_mock() 115 | 116 | assert BarController.finalizer == "test" 117 | -------------------------------------------------------------------------------- /tests/test_exceptions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the custom assert functions 3 | """ 4 | 5 | # Third Party 6 | import pytest 7 | 8 | # Local 9 | from oper8 import exceptions 10 | 11 | 12 | def test_assert_precondition_pass(): 13 | """Make sure that no exception is throw by assert_precondition when it 14 | passes 15 | """ 16 | exceptions.assert_precondition(True) 17 | 18 | 19 | def test_assert_precondition_fail(): 20 | """Make sure the right exception is thrown by assert_precondition when it 21 | fails 22 | """ 23 | exception_msg = "error mesage" 24 | with pytest.raises(exceptions.PreconditionError, match=exception_msg): 25 | exceptions.assert_precondition(False, exception_msg) 26 | 27 | 28 | def test_assert_config_pass(): 29 | """Make sure that no exception is throw by assert_config when it 30 | passes 31 | """ 32 | exceptions.assert_config(True) 33 | 34 | 35 | def test_assert_config_fail(): 36 | """Make sure the right exception is thrown by assert_config when it 37 | fails 38 | """ 39 | exception_msg = "error mesage" 40 | with pytest.raises(exceptions.ConfigError, match=exception_msg): 41 | exceptions.assert_config(False, exception_msg) 42 | 43 | 44 | def test_assert_cluster_pass(): 45 | """Make sure that no exception is throw by assert_cluster when it 46 | passes 47 | """ 48 | exceptions.assert_cluster(True) 49 | 50 | 51 | def test_assert_cluster_fail(): 52 | """Make sure the right exception is thrown by assert_cluster when it 53 | fails 54 | """ 55 | exception_msg = "error mesage" 56 | with pytest.raises(exceptions.ClusterError, match=exception_msg): 57 | exceptions.assert_cluster(False, exception_msg) 58 | 59 | 60 | def test_exception_derived_from_base(): 61 | """Make sure the derived classes is instance of the base Oper8 Class""" 62 | fatal_error = exceptions.Oper8FatalError() 63 | assert isinstance(fatal_error, exceptions.Oper8Error) 64 | expected_error = exceptions.Oper8ExpectedError() 65 | assert isinstance(expected_error, exceptions.Oper8Error) 66 | 67 | 68 | def test_cluter_is_fatal(): 69 | """Make sure the cluster error is considered fatal error""" 70 | with pytest.raises(exceptions.ClusterError) as cluster_error: 71 | exceptions.assert_cluster(False) 72 | assert isinstance(cluster_error.value, exceptions.Oper8FatalError) 73 | assert cluster_error.value.is_fatal_error 74 | 75 | 76 | def test_config_is_fatal(): 77 | """Make sure the config error is considered fatal error""" 78 | with pytest.raises(exceptions.ConfigError) as config_error: 79 | exceptions.assert_config(False) 80 | assert isinstance(config_error.value, exceptions.Oper8FatalError) 81 | assert config_error.value.is_fatal_error 82 | 83 | 84 | def test_rollout_is_fatal(): 85 | """Make sure the rollout error is considered fatal error""" 86 | with pytest.raises(exceptions.RolloutError) as rollout_error: 87 | raise exceptions.RolloutError() 88 | assert isinstance(rollout_error.value, exceptions.Oper8FatalError) 89 | assert rollout_error.value.is_fatal_error 90 | 91 | 92 | def test_precondition_is_non_fatal(): 93 | """Make sure the expected errors are not setting the fatal error flag""" 94 | with pytest.raises(exceptions.PreconditionError) as precondition_error: 95 | exceptions.assert_precondition(False) 96 | assert not precondition_error.value.is_fatal_error 97 | assert not isinstance(precondition_error.value, exceptions.Oper8FatalError) 98 | assert isinstance(precondition_error.value, exceptions.Oper8ExpectedError) 99 | 100 | 101 | def test_verification_is_non_fatal(): 102 | """Make sure the expected errors are not setting the fatal error flag""" 103 | msg = "Some verification message" 104 | with pytest.raises(exceptions.VerificationError, match=msg) as verification_error: 105 | exceptions.assert_verified(False, msg) 106 | assert not verification_error.value.is_fatal_error 107 | assert not isinstance(verification_error.value, exceptions.Oper8FatalError) 108 | assert isinstance(verification_error.value, exceptions.Oper8ExpectedError) 109 | -------------------------------------------------------------------------------- /tests/watch_manager/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/oper8/63d30d3d38147283ba9621c625aa83f4461ead22/tests/watch_manager/__init__.py -------------------------------------------------------------------------------- /tests/watch_manager/ansible_watch_manager/modules/test_log_rotator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the implementation of the log_rotator open_iteration function 3 | """ 4 | 5 | # Standard 6 | import json 7 | import os 8 | import shlex 9 | import subprocess 10 | import sys 11 | import tempfile 12 | 13 | # First Party 14 | import alog 15 | 16 | # Local 17 | from oper8.test_helpers.helpers import configure_logging 18 | 19 | configure_logging() 20 | log = alog.use_channel("TEST") 21 | 22 | 23 | def strip_indentation(script_string): 24 | # Strip off "extra" indentation. This allows the tests to not look TERRIBLE 25 | lines = script_string.split("\n") 26 | first_line = [l for l in lines if l.strip()][0] 27 | indentation = first_line[: len(first_line) - len(first_line.lstrip())] 28 | if all([l.startswith(indentation) or not l for l in lines]): 29 | lines = [l[len(indentation) :] for l in lines] 30 | return "\n".join(lines) 31 | 32 | 33 | def run_test_script(script_string): 34 | """Since the goal of the rotator is to perform log rotation over a series of 35 | successive python process executions, we use this helper to launch 36 | subprocesses 37 | """ 38 | 39 | script = strip_indentation(script_string) 40 | with tempfile.NamedTemporaryFile(mode="w") as f_handle: 41 | # Save the script 42 | f_handle.write(script) 43 | f_handle.flush() 44 | 45 | # Run it 46 | assert os.path.exists(f_handle.name) 47 | cmd = f"{sys.executable} {f_handle.name}" 48 | with alog.ContextTimer(log.info, "Subprocess runtime: "): 49 | subprocess.run(shlex.split(cmd)) 50 | 51 | 52 | def parse_log_files(workdir): 53 | out = {} 54 | for fname in os.listdir(workdir): 55 | fpath = os.path.join(workdir, fname) 56 | with open(fpath, "r") as log_handle: 57 | out[fname] = json.load(log_handle) 58 | return out 59 | 60 | 61 | def test_alog_configure(): 62 | """Test that a log file is created when using the AutoRotatingFileHandler 63 | with alog.configure 64 | """ 65 | with tempfile.TemporaryDirectory() as workdir: 66 | log_file_name = "foo.log" 67 | 68 | # Define the "doit" script which will open the log file for rotation 69 | def get_script(i): 70 | return f""" 71 | import alog 72 | from oper8.watch_manager.ansible_watch_manager.modules.log_rotator import AutoRotatingFileHandler 73 | alog.configure( 74 | "info", 75 | formatter="json", 76 | handler_generator=lambda: AutoRotatingFileHandler( 77 | '{os.path.join(workdir, log_file_name)}', 78 | backupCount=1, 79 | ), 80 | ) 81 | ch = alog.use_channel("TEST") 82 | ch.info("TEST {i}") 83 | """ 84 | 85 | # Run the script the first time 86 | run_test_script(get_script(1)) 87 | all_files = parse_log_files(workdir) 88 | assert len(all_files) == 1 89 | assert log_file_name in all_files 90 | assert all_files[log_file_name]["message"] == "TEST 1" 91 | 92 | # Run it a second time and make sure it keeps one backup 93 | run_test_script(get_script(2)) 94 | all_files = parse_log_files(workdir) 95 | assert len(all_files) == 2 96 | assert log_file_name in all_files 97 | assert all_files[log_file_name]["message"] == "TEST 2" 98 | assert all_files[f"{log_file_name}.1"]["message"] == "TEST 1" 99 | 100 | # Run it a third time and make sure it keeps only one backup 101 | run_test_script(get_script(3)) 102 | all_files = parse_log_files(workdir) 103 | assert len(all_files) == 2 104 | assert log_file_name in all_files 105 | assert all_files[log_file_name]["message"] == "TEST 3" 106 | assert all_files[f"{log_file_name}.1"]["message"] == "TEST 2" 107 | -------------------------------------------------------------------------------- /tests/watch_manager/python_watch_manager/filters/test_common_filters.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for the common filter functions 3 | """ 4 | # Third Party 5 | import pytest 6 | 7 | # First Party 8 | import alog 9 | 10 | # Local 11 | from oper8.exceptions import ConfigError 12 | from oper8.watch_manager.python_watch_manager.filters.common import import_filter 13 | from oper8.watch_manager.python_watch_manager.filters.filters import EnableFilter 14 | 15 | ## Helpers ##################################################################### 16 | 17 | 18 | def test_import_filter(): 19 | assert ( 20 | import_filter( 21 | "oper8.watch_manager.python_watch_manager.filters.filters.EnableFilter" 22 | ) 23 | == EnableFilter 24 | ) 25 | 26 | 27 | def test_import_filter_fail(): 28 | with pytest.raises(ConfigError): 29 | import_filter( 30 | "oper8.watch_manager.python_watch_manager.filters.filters.DoesNotExist" 31 | ) 32 | -------------------------------------------------------------------------------- /tests/watch_manager/python_watch_manager/filters/test_manager.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for the FilterManager 3 | """ 4 | # Third Party 5 | import pytest 6 | 7 | # First Party 8 | import alog 9 | 10 | # Local 11 | from oper8.deploy_manager.kube_event import KubeEventType 12 | from oper8.test_helpers.pwm_helpers import make_managed_object 13 | from oper8.watch_manager.python_watch_manager.filters.filters import ( 14 | CreationDeletionFilter, 15 | DisableFilter, 16 | EnableFilter, 17 | NoGenerationFilter, 18 | ) 19 | from oper8.watch_manager.python_watch_manager.filters.manager import ( 20 | AndFilter, 21 | FilterManager, 22 | OrFilter, 23 | ) 24 | 25 | ## Helpers ##################################################################### 26 | 27 | 28 | def test_manager_happy_path(): 29 | resource = make_managed_object(generation=None) 30 | filter = OrFilter(CreationDeletionFilter, NoGenerationFilter) 31 | fm = FilterManager(filter, resource) 32 | 33 | assert fm.update_and_test(resource, KubeEventType.ADDED) 34 | updated_resource = make_managed_object(spec={"modified": "value"}) 35 | assert fm.update_and_test(updated_resource, KubeEventType.MODIFIED) 36 | assert not fm.update_and_test(updated_resource, KubeEventType.MODIFIED) 37 | 38 | 39 | @pytest.mark.parametrize( 40 | ["filters", "result"], 41 | [ 42 | [EnableFilter, True], 43 | [AndFilter(EnableFilter, DisableFilter), False], 44 | [OrFilter(EnableFilter, DisableFilter), True], 45 | [AndFilter(OrFilter(EnableFilter, DisableFilter), EnableFilter), True], 46 | [AndFilter(OrFilter(DisableFilter, DisableFilter), EnableFilter), False], 47 | ], 48 | ) 49 | def test_manager_conditionals(filters, result): 50 | resource = make_managed_object() 51 | fm = FilterManager(filters, resource) 52 | assert fm.test(resource, KubeEventType.ADDED) == result 53 | 54 | 55 | def test_manager_to_info(): 56 | filters = OrFilter(CreationDeletionFilter, NoGenerationFilter) 57 | 58 | filter_info = FilterManager.to_info(filters) 59 | round_tripped_filters = FilterManager.from_info(filter_info) 60 | 61 | assert round_tripped_filters == filters 62 | -------------------------------------------------------------------------------- /tests/watch_manager/python_watch_manager/leader_election/test_init.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for the Leadership common functions 3 | """ 4 | # Third Party 5 | import pytest 6 | 7 | # Local 8 | from oper8.test_helpers.helpers import library_config 9 | from oper8.watch_manager.python_watch_manager.leader_election import ( 10 | AnnotationLeadershipManager, 11 | DryRunLeadershipManager, 12 | LeaderForLifeManager, 13 | LeaderWithLeaseManager, 14 | get_leader_election_class, 15 | ) 16 | 17 | ## Helpers ##################################################################### 18 | 19 | 20 | @pytest.mark.parametrize( 21 | ["config", "expected_class"], 22 | [ 23 | ["leader-for-life", LeaderForLifeManager], 24 | ["leader-with-lease", LeaderWithLeaseManager], 25 | ["annotation", AnnotationLeadershipManager], 26 | ["dryrun", DryRunLeadershipManager], 27 | ], 28 | ) 29 | def test_get_leader_election_class(config, expected_class): 30 | with library_config(python_watch_manager={"lock": {"type": config}}): 31 | assert get_leader_election_class() == expected_class 32 | -------------------------------------------------------------------------------- /tests/watch_manager/python_watch_manager/threads/test_heartbeat.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for the HeartbeatThread 3 | """ 4 | # Standard 5 | from datetime import datetime, timedelta 6 | from unittest import mock 7 | 8 | # Local 9 | from oper8.test_helpers.pwm_helpers import ( 10 | MockedHeartbeatThread, 11 | heartbeat_file, 12 | read_heartbeat_file, 13 | ) 14 | 15 | ## Helpers ##################################################################### 16 | 17 | 18 | class FailOnceOpen: 19 | def __init__(self, fail_on: int = 1): 20 | self.call_num = 0 21 | self.fail_on = fail_on 22 | self._real_open = open 23 | 24 | def __call__(self, *args, **kwargs): 25 | self.call_num += 1 26 | if self.call_num == self.fail_on: 27 | print(f"Raising on call {self.call_num}") 28 | raise OSError("Yikes") 29 | print(f"Returning from call {self.call_num}") 30 | return self._real_open(*args, **kwargs) 31 | 32 | 33 | ## Tests ####################################################################### 34 | 35 | 36 | def test_heartbeat_happy_path(heartbeat_file): 37 | """Make sure the heartbeat initializes correctly""" 38 | hb = MockedHeartbeatThread(heartbeat_file, "1s") 39 | 40 | # Heartbeat not run until started 41 | with open(heartbeat_file) as handle: 42 | assert not handle.read() 43 | 44 | # Start and stop the thread to trigger the first heartbeat only 45 | hb.start_thread() 46 | hb.wait_for_beat() 47 | hb.stop_thread() 48 | 49 | # Make sure the heartbeat is "current" 50 | assert read_heartbeat_file(heartbeat_file) > (datetime.now() - timedelta(seconds=5)) 51 | 52 | 53 | def test_heartbeat_ongoing(heartbeat_file): 54 | """Make sure that the heartbeat continues to beat in an ongoing way""" 55 | hb = MockedHeartbeatThread(heartbeat_file, "1s") 56 | 57 | # Start the thread and read the first one 58 | hb.start_thread() 59 | hb.wait_for_beat() 60 | first_hb = read_heartbeat_file(heartbeat_file) 61 | 62 | # Wait a bit and read again 63 | hb.wait_for_beat() 64 | hb.stop_thread() 65 | later_hb = read_heartbeat_file(heartbeat_file) 66 | assert later_hb > first_hb 67 | 68 | 69 | def test_heartbeat_with_exception(heartbeat_file): 70 | """Make sure that a sporadic failure does not terminate the heartbeat""" 71 | # Mock so that the third call to open will raise. This correlates with the 72 | # second heartbeat since we read the file using open after each heartbeat 73 | with mock.patch("builtins.open", new=FailOnceOpen(3)): 74 | hb = MockedHeartbeatThread(heartbeat_file, "1s") 75 | hb.start_thread() 76 | 77 | # The first beat succeeds 78 | hb.wait_for_beat() 79 | first_hb = read_heartbeat_file(heartbeat_file) 80 | 81 | # The first beat raises, but doesn't cause any problems 82 | hb.wait_for_beat() 83 | second_hb = read_heartbeat_file(heartbeat_file) 84 | 85 | # The third beat succeeds 86 | hb.wait_for_beat() 87 | third_hb = read_heartbeat_file(heartbeat_file) 88 | hb.stop_thread() 89 | 90 | assert first_hb == second_hb 91 | assert third_hb > first_hb 92 | -------------------------------------------------------------------------------- /tests/watch_manager/python_watch_manager/threads/test_timer_thread.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for the TimerThread 3 | """ 4 | # Standard 5 | from datetime import datetime, timedelta 6 | import time 7 | 8 | # Third Party 9 | import pytest 10 | 11 | # Local 12 | from oper8.test_helpers.pwm_helpers import MockedTimerThread 13 | 14 | ## Helpers ##################################################################### 15 | 16 | 17 | class Counter: 18 | def __init__(self, initial_value=0): 19 | self.value = initial_value 20 | 21 | def increment(self, value=1): 22 | self.value += value 23 | 24 | 25 | @pytest.mark.timeout(5) 26 | def test_timer_thread_happy_path(): 27 | timer = MockedTimerThread() 28 | timer.start_thread() 29 | 30 | value_tracker = Counter() 31 | timer.put_event(datetime.now(), value_tracker.increment) 32 | timer.put_event(datetime.now() + timedelta(seconds=0.1), value_tracker.increment) 33 | timer.put_event(datetime.now() + timedelta(seconds=0.2), value_tracker.increment, 2) 34 | timer.put_event( 35 | datetime.now() + timedelta(seconds=0.3), value_tracker.increment, value=2 36 | ) 37 | time.sleep(2.5) 38 | timer.stop_thread() 39 | assert value_tracker.value == 6 40 | 41 | 42 | @pytest.mark.timeout(5) 43 | def test_timer_thread_canceled(): 44 | timer = MockedTimerThread() 45 | 46 | value_tracker = Counter() 47 | timer.put_event(datetime.now(), value_tracker.increment) 48 | canceled_event = timer.put_event( 49 | datetime.now() + timedelta(seconds=0.5), value_tracker.increment 50 | ) 51 | canceled_event.cancel() 52 | 53 | timer.start_thread() 54 | time.sleep(2) 55 | timer.stop_thread() 56 | assert value_tracker.value == 1 57 | -------------------------------------------------------------------------------- /tests/watch_manager/python_watch_manager/utils/test_pwm_util_common.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for common PWM utils 3 | """ 4 | # Standard 5 | from datetime import timedelta 6 | from unittest import mock 7 | from uuid import uuid4 8 | import logging 9 | 10 | # Third Party 11 | import pytest 12 | 13 | # Local 14 | from oper8.test_helpers.helpers import library_config 15 | from oper8.watch_manager.python_watch_manager.utils import common 16 | 17 | 18 | @pytest.mark.parametrize( 19 | "test_cfg", 20 | [ 21 | ("12hr13m14s", timedelta(hours=12, minutes=13, seconds=14)), 22 | ("0hr0m62s", timedelta(minutes=1, seconds=2)), 23 | ("0hr0m62.5s", timedelta(minutes=1, seconds=2.5)), 24 | ("10s", timedelta(seconds=10)), 25 | ("foobar", None), 26 | ("0hr0m1.2.3s", None), 27 | ], 28 | ) 29 | def test_parse_time_delta(test_cfg): 30 | """Make sure that the parser behaves as expected for valid and invalid 31 | timedeltas 32 | """ 33 | time_str, result = test_cfg 34 | assert common.parse_time_delta(time_str) == result 35 | 36 | 37 | def test_get_operator_namespace_from_file(): 38 | ns_name = "some-namespace" 39 | other_ns_name = "other-namespace" 40 | path_mock = mock.MagicMock() 41 | path_mock.is_file = mock.MagicMock(return_value=True) 42 | path_mock.read_text = mock.MagicMock(return_value=ns_name) 43 | with mock.patch("pathlib.Path", return_value=path_mock), library_config( 44 | python_watch_manager={"lock": {"namespace": other_ns_name}} 45 | ): 46 | assert common.get_operator_namespace() == ns_name 47 | assert path_mock.is_file.called 48 | assert path_mock.read_text.called 49 | 50 | 51 | def test_get_operator_namespace_from_config(): 52 | ns_name = "some-namespace" 53 | other_ns_name = "other-namespace" 54 | path_mock = mock.MagicMock() 55 | path_mock.is_file = mock.MagicMock(return_value=False) 56 | path_mock.read_text = mock.MagicMock(return_value=ns_name) 57 | with mock.patch("pathlib.Path", return_value=path_mock), library_config( 58 | python_watch_manager={"lock": {"namespace": other_ns_name}} 59 | ): 60 | assert common.get_operator_namespace() == other_ns_name 61 | assert path_mock.is_file.called 62 | assert not path_mock.read_text.called 63 | 64 | 65 | def test_get_logging_handler_adds_stream_handlers(): 66 | """Make sure that get_logging_handlers adds a stream handler by default if 67 | no other handlers configured 68 | """ 69 | logger = logging.Logger(str(uuid4())) 70 | with mock.patch("logging.getLogger", return_value=logger): 71 | returned_handlers = common.get_logging_handlers() 72 | assert len(returned_handlers) == 1 73 | assert isinstance(returned_handlers[0], logging.StreamHandler) 74 | 75 | 76 | def test_get_logging_handler_does_not_overwrite_other_handlers(): 77 | """Make sure that get_logging_handlers does not change preconfigured 78 | handlers 79 | """ 80 | logger = logging.Logger(str(uuid4())) 81 | logger.addHandler(logging.NullHandler()) 82 | with mock.patch("logging.getLogger", return_value=logger): 83 | returned_handlers = common.get_logging_handlers() 84 | assert len(returned_handlers) == 1 85 | assert isinstance(returned_handlers[0], logging.NullHandler) 86 | -------------------------------------------------------------------------------- /tests/watch_manager/test_watch_manager_base.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for the WatchManagerBase base class 3 | """ 4 | 5 | # Standard 6 | import threading 7 | import time 8 | 9 | # Third Party 10 | import pytest 11 | 12 | # Local 13 | from oper8.test_helpers.helpers import DummyController 14 | from oper8.watch_manager.base import WatchManagerBase 15 | 16 | ## Helpers ##################################################################### 17 | 18 | 19 | class DummyWatchManager(WatchManagerBase): 20 | def __init__( 21 | self, 22 | controller_type, 23 | watch_success=True, 24 | stop_wait=0.0, 25 | ): 26 | super().__init__(controller_type) 27 | self.watching = False 28 | self.watch_success = watch_success 29 | self.stop_wait = stop_wait 30 | 31 | def watch(self): 32 | if self.watch_success: 33 | self.watching = True 34 | return True 35 | return False 36 | 37 | def wait(self): 38 | while self.watching: 39 | time.sleep(0.05) 40 | 41 | def stop(self): 42 | if self.stop_wait: 43 | threading.Thread(target=self._delayed_stop).start() 44 | else: 45 | self.watching = False 46 | 47 | def _delayed_stop(self): 48 | time.sleep(self.stop_wait) 49 | self.watching = False 50 | 51 | 52 | class DummyController2(DummyController): 53 | group = "asdf.qwer" 54 | version = "v1" 55 | kind = "Widget" 56 | 57 | 58 | @pytest.fixture(autouse=True) 59 | def reset_globals(): 60 | """This helper is only used in tests to "reset" the state of the global 61 | watches dict 62 | """ 63 | WatchManagerBase._ALL_WATCHES = {} 64 | 65 | 66 | ## Tests ####################################################################### 67 | 68 | 69 | def test_constructor_properties(): 70 | """Test that the base class properties are set on the watch manager""" 71 | wm = DummyWatchManager(DummyController) 72 | assert wm.controller_type == DummyController 73 | assert wm.group == DummyController.group 74 | assert wm.version == DummyController.version 75 | assert wm.kind == DummyController.kind 76 | 77 | 78 | def test_constructor_registrations(): 79 | """Test that all constructed watch managers get registered""" 80 | wm1 = DummyWatchManager(DummyController) 81 | wm2 = DummyWatchManager(DummyController2) 82 | assert len(WatchManagerBase._ALL_WATCHES) == 2 83 | assert str(wm1) in WatchManagerBase._ALL_WATCHES 84 | assert str(wm2) in WatchManagerBase._ALL_WATCHES 85 | 86 | 87 | def test_constructor_no_duplicate_watches(): 88 | """Test that all constructed watch managers get registered""" 89 | DummyWatchManager(DummyController) 90 | with pytest.raises(AssertionError): 91 | DummyWatchManager(DummyController) 92 | 93 | 94 | def test_start_stop_all_blocking(): 95 | """Test that calling start_all and stop_all with block set to true do indeed 96 | block and correctly start/stop all managers 97 | """ 98 | wm1 = DummyWatchManager(DummyController) 99 | wm2 = DummyWatchManager(DummyController2) 100 | 101 | # Run start_all in a thread so that we can stop it 102 | thrd = threading.Thread(target=WatchManagerBase.start_all) 103 | thrd.start() 104 | time.sleep(0.1) 105 | 106 | # Make sure both watch managers are watching 107 | assert wm1.watching 108 | assert wm2.watching 109 | 110 | # Make sure the thread is blocked 111 | assert thrd.is_alive() 112 | 113 | # Stop all watch managers, and make sure all watch managers are not watching 114 | WatchManagerBase.stop_all() 115 | assert not wm1.watching 116 | assert not wm2.watching 117 | 118 | 119 | def test_start_all_blocking_failure(): 120 | """Test that calling start_all when one of the managers fails to start 121 | cleanly shuts down any started managers 122 | """ 123 | # NOTE: failure is on wm1 because it comes second alphabetically 124 | wm1 = DummyWatchManager(DummyController, watch_success=False) 125 | wm2 = DummyWatchManager(DummyController2) 126 | 127 | # Start them and make sure it returns failure 128 | assert not WatchManagerBase.start_all() 129 | assert not wm1.watching 130 | assert not wm2.watching 131 | -------------------------------------------------------------------------------- /tests/x/datastores/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/oper8/63d30d3d38147283ba9621c625aa83f4461ead22/tests/x/datastores/__init__.py -------------------------------------------------------------------------------- /tests/x/datastores/cos/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/oper8/63d30d3d38147283ba9621c625aa83f4461ead22/tests/x/datastores/cos/__init__.py -------------------------------------------------------------------------------- /tests/x/datastores/postgres/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/oper8/63d30d3d38147283ba9621c625aa83f4461ead22/tests/x/datastores/postgres/__init__.py -------------------------------------------------------------------------------- /tests/x/datastores/postgres/test_pg_factory.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests of the postgres factory 3 | """ 4 | 5 | 6 | # Local 7 | from oper8.test_helpers.helpers import setup_session_ctx 8 | from oper8.x.datastores.postgres.factory import PostgresFactory 9 | from tests.x.datastores.postgres.util import ( 10 | POSTGRES_PROVIDED_CONNECTION_DEPLOY_CONFIG_OVERRIDES, 11 | set_postgres_secrets, 12 | ) 13 | 14 | ## Helpers ##################################################################### 15 | 16 | 17 | def get_config_overrides(config): 18 | return {"postgres": config} 19 | 20 | 21 | ## Tests ####################################################################### 22 | 23 | 24 | def test_get_component_with_provided_connection(): 25 | """Test that a provided connection passed in through the CR returns None on a get_component call""" 26 | override_deploy_configs = POSTGRES_PROVIDED_CONNECTION_DEPLOY_CONFIG_OVERRIDES 27 | 28 | with setup_session_ctx(deploy_config=override_deploy_configs) as session: 29 | set_postgres_secrets(session) 30 | component = PostgresFactory.get_component(session) 31 | # Since the component is provided, we should be getting None back 32 | assert component is None 33 | -------------------------------------------------------------------------------- /tests/x/datastores/postgres/util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Sharted utils for testing postgres 3 | """ 4 | 5 | # First Party 6 | import aconfig 7 | 8 | # Local 9 | from oper8.test_helpers.helpers import TEST_INSTANCE_NAME 10 | from oper8.test_helpers.oper8x_helpers import set_secret_data 11 | from oper8.x.datastores.postgres.factory import PostgresFactory 12 | from oper8.x.utils import common, constants 13 | 14 | ## Globals ##################################################################### 15 | 16 | SECRET_NAME_UNSCOPED_AUTH = "postgres-admin-auth" 17 | SECRET_NAME_UNSCOPED_TLS = "postgres-ca" 18 | # Used for testing/dry run purposes 19 | SECRET_NAME_UNSCOPED_URI = "postgres-uri" 20 | 21 | 22 | class AuthSecretKeys: 23 | USING_SECRET = "USING_SECRET" 24 | USERNAME = "username" 25 | PASSWORD = "password" 26 | PGPASS = "pgpass" 27 | PG_REPLICATION_USER = "PG_REPLICATION_USER" 28 | PG_REPLICATION_PASSWORD = "PG_REPLICATION_PASSWORD" 29 | ALL_KEYS = [ 30 | USERNAME, 31 | PASSWORD, 32 | PGPASS, 33 | ] 34 | 35 | 36 | class TlsSecretKeys: 37 | CERT = "ca.crt" 38 | KEY = "ca.key" 39 | CA_CERT = "ca.crt" 40 | ALL_KEYS = [ 41 | CERT, 42 | KEY, 43 | CA_CERT, 44 | ] 45 | 46 | 47 | class UriSecretKeys: 48 | HOSTNAME = "hostname" 49 | PORT = "port" 50 | 51 | 52 | TEST_POSTGRES_AUTH = { 53 | AuthSecretKeys.USING_SECRET: common.b64_secret("fake-secret"), 54 | AuthSecretKeys.USERNAME: common.b64_secret("fake-user"), 55 | AuthSecretKeys.PASSWORD: common.b64_secret("fake-password"), 56 | AuthSecretKeys.PGPASS: common.b64_secret("fake-pgpass"), 57 | AuthSecretKeys.PG_REPLICATION_USER: common.b64_secret("fake-replication-user"), 58 | AuthSecretKeys.PG_REPLICATION_PASSWORD: common.b64_secret( 59 | "fake-replication-password" 60 | ), 61 | } 62 | 63 | TEST_POSTGRES_TLS = { 64 | TlsSecretKeys.CERT: common.b64_secret("fake-tls-cert"), 65 | TlsSecretKeys.CA_CERT: common.b64_secret("fake-tls-ca-cert"), 66 | TlsSecretKeys.KEY: common.b64_secret("fake-tls-key"), 67 | } 68 | 69 | 70 | TEST_POSTGRES_URI = { 71 | UriSecretKeys.HOSTNAME: common.b64_secret("fake-uri-hostname"), 72 | UriSecretKeys.PORT: common.b64_secret("51423"), 73 | } 74 | 75 | POSTGRES_PROVIDED_CONNECTION_DEPLOY_CONFIG_OVERRIDES = { 76 | constants.SPEC_DATASTORES: { 77 | PostgresFactory.DATASTORE_TYPE: { 78 | constants.SPEC_DATASTORE_CONNECTION: { 79 | "uri_secret": f"{TEST_INSTANCE_NAME}-{SECRET_NAME_UNSCOPED_URI}", 80 | "uri_secret_hostname_field": UriSecretKeys.HOSTNAME, 81 | "uri_secret_port_field": UriSecretKeys.PORT, 82 | "auth_secret_name": f"{TEST_INSTANCE_NAME}-{SECRET_NAME_UNSCOPED_AUTH}", 83 | "auth_secret_username_field": AuthSecretKeys.USERNAME, 84 | "auth_secret_password_field": AuthSecretKeys.PASSWORD, 85 | "tls_secret_name": f"{TEST_INSTANCE_NAME}-{SECRET_NAME_UNSCOPED_TLS}", 86 | "tls_secret_cert_field": TlsSecretKeys.CERT, 87 | } 88 | } 89 | } 90 | } 91 | 92 | 93 | def set_postgres_secrets(session): 94 | set_postgres_auth_secret(session) 95 | set_postgres_tls_secret(session) 96 | set_postgres_uri_secret(session) 97 | 98 | 99 | def set_postgres_auth_secret(session, override=None): 100 | secret_data = override or TEST_POSTGRES_AUTH 101 | set_secret_data( 102 | session, 103 | SECRET_NAME_UNSCOPED_AUTH, 104 | secret_data, 105 | ) 106 | 107 | 108 | def set_postgres_tls_secret(session, override=None): 109 | secret_data = override or TEST_POSTGRES_TLS 110 | set_secret_data( 111 | session, 112 | SECRET_NAME_UNSCOPED_TLS, 113 | data=secret_data, 114 | ) 115 | 116 | 117 | def set_postgres_uri_secret(session, override=None): 118 | secret_data = override or TEST_POSTGRES_URI 119 | set_secret_data(session, SECRET_NAME_UNSCOPED_URI, secret_data) 120 | 121 | 122 | def get_spec_overrides(): 123 | return { 124 | constants.SPEC_DATASTORES: {"postgres": {"storageClassName": "test-storage"}} 125 | } 126 | -------------------------------------------------------------------------------- /tests/x/datastores/redis/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/oper8/63d30d3d38147283ba9621c625aa83f4461ead22/tests/x/datastores/redis/__init__.py -------------------------------------------------------------------------------- /tests/x/datastores/redis/test_redis_factory.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests of the redis factory 3 | """ 4 | # Third Party 5 | import pytest 6 | 7 | # Local 8 | from oper8.test_helpers.helpers import setup_session 9 | from oper8.x.datastores.redis.factory import RedisFactory 10 | 11 | ## Helpers ##################################################################### 12 | 13 | 14 | def get_config_overrides(config): 15 | return {RedisFactory.DATASTORE_TYPE: config} 16 | 17 | 18 | ## Error cases ################################################################# 19 | 20 | 21 | def test_construct_unknown(): 22 | """ " Test error when unsupport type to RedisFactory""" 23 | session = setup_session(app_config=get_config_overrides({"type": "UnknownType"})) 24 | with pytest.raises(AssertionError): 25 | RedisFactory.get_component(session) 26 | 27 | 28 | def test_construct_notype(): 29 | """ " Test error when no type specified to RedisFactory""" 30 | session = setup_session(app_config=get_config_overrides({"type": "ToRemove"})) 31 | if session.config.redis["type"] is not None: 32 | del session.config.redis["type"] 33 | with pytest.raises(AssertionError): 34 | RedisFactory.get_component(session) 35 | -------------------------------------------------------------------------------- /tests/x/datastores/redis/test_redis_interfaces.py: -------------------------------------------------------------------------------- 1 | """ 2 | Make sure that redis interfaces import cleanly 3 | """ 4 | 5 | # Third Party 6 | import pytest 7 | 8 | # Local 9 | from oper8.x.datastores.redis.interfaces import IRedisComponent 10 | 11 | 12 | def test_is_abstract(): 13 | with pytest.raises(TypeError): 14 | IRedisComponent() 15 | -------------------------------------------------------------------------------- /tests/x/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/oper8/63d30d3d38147283ba9621c625aa83f4461ead22/tests/x/utils/__init__.py -------------------------------------------------------------------------------- /tests/x/utils/test_abc_static.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for the ABCStatic class base 3 | """ 4 | 5 | # Standard 6 | import abc 7 | 8 | # Third Party 9 | import pytest 10 | 11 | # Local 12 | from oper8.x.utils.abc_static import ABCStatic 13 | 14 | 15 | class Base(ABCStatic): 16 | @classmethod 17 | def foo(cls): 18 | """A classmethod that is implemented in the base""" 19 | return True 20 | 21 | def bar(self): 22 | """A standard instance method on the base class""" 23 | return 42 24 | 25 | @classmethod 26 | @abc.abstractmethod 27 | def baz(cls, arg): 28 | """An abstractmethod that is also a classmethod""" 29 | 30 | @abc.abstractmethod 31 | def bat(self, arg): 32 | """An abstractmethod that is NOT a classmethod""" 33 | 34 | 35 | ## Tests ####################################################################### 36 | 37 | 38 | def test_correct_implementation(): 39 | """Make sure that defining a class which follows the correct @classmethod 40 | definitions defines cleanly and can be used as expected 41 | """ 42 | 43 | class Foo(Base): 44 | @classmethod 45 | def baz(cls, arg): 46 | return arg + 1 47 | 48 | def bat(self, arg): 49 | return arg + 2 50 | 51 | assert Foo.foo() 52 | assert Foo().bar() == 42 53 | assert Foo.baz(1) == 2 54 | assert Foo().bat(1) == 3 55 | 56 | 57 | def test_staticmethod_implementation(): 58 | """Test that an abstract classmethod can be implemented with a @staticmethod 59 | in the child 60 | """ 61 | 62 | class Foo(Base): 63 | @staticmethod 64 | def baz(arg): 65 | return arg + 1 66 | 67 | def bat(self, arg): 68 | return arg + 2 69 | 70 | assert Foo.foo() 71 | assert Foo().bar() == 42 72 | assert Foo.baz(1) == 2 73 | assert Foo().bat(1) == 3 74 | 75 | 76 | def test_classmethod_implemented_with_star_args(): 77 | """Test that an abstract classmethod which is implemented with *args, 78 | **kwargs is ok 79 | """ 80 | 81 | class Foo(Base): 82 | 83 | BASE = 10 84 | 85 | @classmethod 86 | def baz(cls, *args, **kwargs): 87 | return cls.BASE + 1 88 | 89 | def bat(self, arg): 90 | return arg + 2 91 | 92 | assert Foo.foo() 93 | assert Foo.baz(1) == 11 94 | 95 | 96 | def test_incorrect_instancemethod_implementation(): 97 | """Test that implementing an abstractmethod/classmethod as an instance 98 | method results in a declaration-time exception. 99 | """ 100 | with pytest.raises(NotImplementedError): 101 | 102 | class Foo(Base): 103 | def __init__(self, x): 104 | self.x = x 105 | 106 | def baz(self, arg): 107 | return self.x + 1 108 | 109 | def bat(self, arg): 110 | return arg + 2 111 | 112 | 113 | def test_abstractclassmethod_cannot_be_called(): 114 | """Test that an abstractmethod/classmethod which is not implemented by a 115 | child class raises when called 116 | """ 117 | 118 | class Foo(Base): 119 | pass 120 | 121 | with pytest.raises(NotImplementedError): 122 | Foo.baz(1) 123 | -------------------------------------------------------------------------------- /tests/x/utils/test_common.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the functions in the common utility 3 | """ 4 | 5 | # Third Party 6 | import pytest 7 | 8 | # First Party 9 | import aconfig 10 | import alog 11 | 12 | # Local 13 | from oper8.exceptions import ConfigError 14 | from oper8.test_helpers.helpers import ( 15 | MockDeployManager, 16 | configure_logging, 17 | setup_cr, 18 | setup_session, 19 | ) 20 | from oper8.test_helpers.oper8x_helpers import set_object_test_state 21 | from oper8.x.utils import common 22 | 23 | configure_logging() 24 | log = alog.use_channel("TEST") 25 | 26 | 27 | ## Unit Tests ################################################################## 28 | 29 | 30 | def test_get_replicas(): 31 | 32 | # Size small 33 | _app_config = aconfig.Config( 34 | {"replicas": {"small": {"foo": 1}, "medium": {"foo": 2}}} 35 | ) 36 | _deploy_config = aconfig.Config({"size": "small"}) 37 | _cr = setup_cr( 38 | metadata={"namespace": "test", "name": "foo"}, 39 | kind="Deployment", 40 | apiVersion="apps/v1", 41 | ) 42 | _dm = MockDeployManager(resources=[_cr]) 43 | 44 | session = setup_session( 45 | app_config=_app_config, 46 | deploy_config=_deploy_config, 47 | deploy_manager=_dm, 48 | ) 49 | # get_replicas when previous state is not present should return valid 50 | # replica count 51 | assert common.get_replicas(session, "foo", "foo") == 1 52 | 53 | # Make sure an override is respected when resource is not present 54 | assert common.get_replicas(session, "foo", "foo", replicas_override=2) == 2 55 | 56 | # Set the state in the cluster and make sure that get_replicas returns 57 | # the current replication count 58 | success, changed = set_object_test_state( 59 | session, 60 | kind="Deployment", 61 | name="foo", 62 | value={ 63 | "apiVersion": "apps/v1", 64 | "metadata": { 65 | "labels": common.get_deploy_labels( 66 | session, 67 | ) 68 | }, 69 | "spec": {"replicas": 3}, 70 | }, 71 | ) 72 | assert success 73 | assert changed 74 | assert common.get_replicas(session, "foo", "foo") == 3 75 | 76 | # Make sure an override is not used when resource is present 77 | assert common.get_replicas(session, "foo", "foo", replicas_override=2) == 3 78 | 79 | # Make sure a force=True call returns the value even when the resource 80 | # exists in the cluster 81 | assert common.get_replicas(session, "foo", "foo", force=True) == 1 82 | 83 | # Make sure that changing the t-shirt size causes the replicas to be 84 | # returned 85 | session.spec.size = "medium" 86 | assert common.get_replicas(session, "foo", "foo") == 2 87 | 88 | # Size medium: make sure sizes are used as keys correctly 89 | session = setup_session( 90 | app_config=_app_config, 91 | deploy_config=aconfig.Config({"size": "medium"}), 92 | ) 93 | assert common.get_replicas(session, "foo", "foo") == 2 94 | 95 | # Size large: make sure misconfigured size raises a ConfigError 96 | session = setup_session( 97 | app_config=_app_config, 98 | deploy_config=aconfig.Config({"size": "large"}), 99 | ) 100 | with pytest.raises(ConfigError): 101 | common.get_replicas(session, "foo", "foo") 102 | 103 | 104 | def test_snake_case_to_camelcase(): 105 | test_input = [ 106 | None, 107 | "string_snake_case", 108 | { 109 | "dict_key_snake_case": "dict_value_snake_case", 110 | "dict_key_snake_case1": None, 111 | }, 112 | ] 113 | expected = [ 114 | None, 115 | "string_snake_case", 116 | {"dictKeySnakeCase": "dict_value_snake_case", "dictKeySnakeCase1": None}, 117 | ] 118 | camel_conversion = common.snake_case_to_camelcase(test_input) 119 | assert camel_conversion == expected 120 | round_trip = common.camelcase_to_snake_case(camel_conversion) 121 | assert round_trip == test_input 122 | -------------------------------------------------------------------------------- /tests/x/utils/test_tls.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the TLS utility functionality 3 | """ 4 | 5 | # Third Party 6 | from cryptography import x509 7 | 8 | # First Party 9 | import alog 10 | 11 | # Local 12 | from oper8.x.utils import tls 13 | 14 | log = alog.use_channel("TEST") 15 | 16 | 17 | def test_get_subject_valid_type(): 18 | """Make sure the type returned by get_subject is the right type""" 19 | subject = tls.get_subject() 20 | assert isinstance(subject, x509.Name) 21 | -------------------------------------------------------------------------------- /tests/x/utils/tls_context/snapshots/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/oper8/63d30d3d38147283ba9621c625aa83f4461ead22/tests/x/utils/tls_context/snapshots/__init__.py -------------------------------------------------------------------------------- /tests/x/utils/tls_context/test_internal.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the TLS context utility functionality 3 | """ 4 | 5 | # Standard 6 | import re 7 | 8 | # First Party 9 | import aconfig 10 | import alog 11 | 12 | # Local 13 | from oper8.test_helpers.helpers import configure_logging, setup_session 14 | from oper8.x.utils.tls_context import factory, internal 15 | from tests.x.utils.tls_context.util import make_server_component_class 16 | 17 | configure_logging() 18 | log = alog.use_channel("TEST") 19 | 20 | NAME_CLEANER = re.compile("[^-a-zA-Z0-9]") 21 | 22 | ## Happy Path ## 23 | 24 | 25 | def test_register_same_cert_twice(): 26 | """Test that it is possible to call register Cert twice (the second call prints WARNING)""" 27 | tls_cfg = aconfig.Config({"type": "internal"}) 28 | session = setup_session(app_config={"tls": tls_cfg}) 29 | context = internal.InternalTlsContext(session, config=tls_cfg) 30 | server_comp = make_server_component_class(request_cert=False)(session) 31 | context.request_server_key_cert_pair(server_comp, ["localhost"]) 32 | context.request_server_key_cert_pair(server_comp, ["localhost"]) 33 | 34 | 35 | def test_passthough_provided_certs(): 36 | """Test that existing certs parameters are passed through""" 37 | tls_cfg = aconfig.Config({"type": "internal"}) 38 | session = setup_session(deploy_config={"tls": tls_cfg}) 39 | existing_key_pem = "-----BEGIN FAKED PRIVATE KEY-----\nfake-key" 40 | existing_cert_pem = "-----BEGIN CERTIFICATE-----\nfake-cert" 41 | 42 | context = internal.InternalTlsContext(session, config=tls_cfg) 43 | server_comp = make_server_component_class(request_cert=False)(session) 44 | context.request_server_key_cert_pair(server_comp, ["localhost"]) 45 | 46 | (key_pem, cert_pem) = context.get_server_key_cert_pair( 47 | server_component=server_comp, 48 | encode=False, 49 | existing_key_pem=existing_key_pem, 50 | existing_cert_pem=existing_cert_pem, 51 | ) 52 | 53 | assert key_pem == existing_key_pem 54 | assert cert_pem == existing_cert_pem 55 | 56 | 57 | def test_multi_session(): 58 | """Test that reusing an existing session where the component has already 59 | been registered does not error out. This can happen when running in 60 | standalone mode (i.e. make run) 61 | """ 62 | session1 = setup_session(app_config={"tls": {"type": "internal"}}) 63 | 64 | # Create the instance 65 | factory.get_tls_context(session1) 66 | 67 | # Create a nested session to simulate standalone recursion 68 | session2 = setup_session(app_config={"tls": {"type": "internal"}}) 69 | factory.get_tls_context(session2) 70 | 71 | # Use the factory again with the original session 72 | factory.get_tls_context(session1) 73 | 74 | 75 | def test_label_overrides(): 76 | """Test that the labels can be overridden in the config""" 77 | tls_cfg = aconfig.Config( 78 | { 79 | "type": "internal", 80 | "labels": {"foo": "bar"}, 81 | } 82 | ) 83 | session = setup_session(app_config={"tls": tls_cfg}) 84 | context = internal.InternalTlsContext(session, config=tls_cfg) 85 | comp = context._component.to_dict(session)[0] 86 | assert comp["metadata"]["labels"] == tls_cfg.labels 87 | -------------------------------------------------------------------------------- /tests/x/utils/tls_context/util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helper methods for tls factory tests / TLC context tests. 3 | """ 4 | 5 | # Standard 6 | import os 7 | import re 8 | 9 | # First Party 10 | import alog 11 | 12 | # Local 13 | from oper8 import Session, assert_precondition 14 | from oper8.test_helpers.oper8x_helpers import TEST_DATA_DIR, set_secret_data 15 | from oper8.x.oper8x_component import Oper8xComponent 16 | from oper8.x.utils import common 17 | from oper8.x.utils.tls_context.factory import ( 18 | _TlsContextSingletonFactory, 19 | get_tls_context, 20 | ) 21 | 22 | log = alog.use_channel("TEST") 23 | 24 | CERT_MGR_TEST_TLS_CERT = "-----BEGIN CERTIFICATE-----\nfake-cert" 25 | CERT_MGR_TEST_TLS_KEY = "-----BEGIN FAKED PRIVATE KEY-----\nfake-key" 26 | CERT_MGR_TEST_CA_CERT = "-----BEGIN CERTIFICATE-----\nfake-cacert" 27 | 28 | NAME_CLEANER = re.compile("[^-a-zA-Z0-9]") 29 | 30 | 31 | def reset_tls_factory(): 32 | _TlsContextSingletonFactory._instance = None 33 | 34 | 35 | def make_server_component_class(server_name="test-server", request_cert=True): 36 | class ServerComponent(Oper8xComponent): 37 | name = server_name 38 | 39 | KEY_FIELD = "copied_server_key" 40 | CRT_FIELD = "copied_server_crt" 41 | 42 | def __init__(self, session, *args, **kwargs): 43 | super().__init__(session, *args, **kwargs) 44 | 45 | # Get a server key/cert pair to use 46 | if request_cert: 47 | log.debug("[%s] Fetching TLS content", self) 48 | self.server_key, self.server_crt = get_tls_context( 49 | self.session 50 | ).get_server_key_cert_pair(self, [], encode=True) 51 | log.debug3("Server Key: %s", self.server_key) 52 | log.debug3("Server Crt: %s", self.server_crt) 53 | 54 | def build_chart(self, *_, **__): 55 | log.debug("[%s] build_chart", self) 56 | assert_precondition(None not in [self.server_key, self.server_crt]) 57 | 58 | # Add a secret to indicate that the precondition passed 59 | secret_name = self.get_secret_name() 60 | self.add_resource( 61 | name=secret_name, 62 | obj=dict( 63 | kind="Secret", 64 | apiVersion="v1", 65 | metadata=dict(name=secret_name), 66 | data={ 67 | self.KEY_FIELD: self.server_key, 68 | self.CRT_FIELD: self.server_crt, 69 | }, 70 | ), 71 | ) 72 | 73 | @classmethod 74 | def get_secret_name(cls): 75 | return f"{cls.name}-secret" 76 | 77 | return ServerComponent 78 | 79 | 80 | def set_cert_manager_secret_for_component( 81 | session: Session, 82 | component_name: str, 83 | data_override={}, 84 | name_override: str = None, 85 | ): 86 | component_name = NAME_CLEANER.sub("", component_name).lower() 87 | if not data_override: 88 | data_override = { 89 | "tls.key": common.b64_secret(CERT_MGR_TEST_TLS_KEY), 90 | "tls.crt": common.b64_secret(CERT_MGR_TEST_TLS_CERT), 91 | "ca.crt": common.b64_secret(CERT_MGR_TEST_CA_CERT), 92 | } 93 | set_secret_data( 94 | session, 95 | name=("tls-" + component_name) if name_override is None else name_override, 96 | data=data_override, 97 | secret_type="kubernetes.io/tls", 98 | ) 99 | set_cert_manager_secret_ca(session) 100 | 101 | 102 | def set_cert_manager_secret_ca(session: Session): 103 | with open(os.path.join(TEST_DATA_DIR, "test_ca.key"), "r") as f: 104 | key_pem = f.read() 105 | with open(os.path.join(TEST_DATA_DIR, "test_ca.crt"), "r") as f: 106 | crt_pem = f.read() 107 | ca_data = { 108 | "tls.key": common.b64_secret(key_pem), 109 | "tls.crt": common.b64_secret(crt_pem), 110 | "ca.crt": common.b64_secret(crt_pem), 111 | } 112 | set_secret_data(session, name="ca", data=ca_data, secret_type="kubernetes.io/tls") 113 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py, lint, fmt, docs 3 | 4 | [testenv] 5 | description = run tests with pytest with coverage 6 | extras = 7 | all 8 | dev-test 9 | passenv = 10 | LOG_LEVEL 11 | LOG_FILTERS 12 | LOG_JSON 13 | LOG_THREAD_ID 14 | LOG_CHANNEL_WIDTH 15 | commands = ./scripts/run_tests.sh {posargs} 16 | allowlist_externals = ./scripts/run_tests.sh 17 | 18 | ; Unclear: We probably want to test wheel packaging 19 | ; But! tox will fail when this is set and _any_ interpreter is missing 20 | ; Without this, sdist packaging is tested so that's a start. 21 | package=wheel 22 | 23 | [testenv:docs] 24 | description = build documentation 25 | extras = dev-docs 26 | commands = ./scripts/document.sh {posargs} 27 | allowlist_externals = ./scripts/document.sh 28 | 29 | [testenv:fmt] 30 | description = format with pre-commit 31 | extras = dev-fmt 32 | commands = ./scripts/fmt.sh 33 | allowlist_externals = ./scripts/fmt.sh 34 | 35 | [testenv:lint] 36 | description = lint with ruff 37 | extras = 38 | all 39 | dev-fmt 40 | dev-test 41 | commands = ruff check oper8 42 | 43 | [testenv:build] 44 | description = build wheel 45 | deps = 46 | build 47 | setuptools 48 | commands = python -m build 49 | skip_install = True 50 | 51 | [testenv:twinecheck] 52 | description = check wheel 53 | deps = twine 54 | commands = twine check dist/* 55 | --------------------------------------------------------------------------------