├── .github ├── ISSUE_TEMPLATE.md └── workflows │ ├── publish.yml │ ├── regular-checks.yml │ └── tests.yml ├── .gitignore ├── .linting-config.yaml ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── AUTHORS ├── CHANGES.rst ├── LICENSE ├── MANIFEST.in ├── README.rst ├── UPGRADING.rst ├── artwork └── cerberus.png ├── cerberus ├── __init__.py ├── benchmarks │ ├── __init__.py │ ├── documents │ │ ├── overall_documents_1.json │ │ └── overall_documents_2.json │ ├── schemas │ │ └── overalll_schema_2.py │ ├── test_overall_performance_1.py │ └── test_overall_performance_2.py ├── errors.py ├── platform.py ├── schema.py ├── tests │ ├── __init__.py │ ├── conftest.py │ ├── test_assorted.py │ ├── test_customization.py │ ├── test_errors.py │ ├── test_legacy.py │ ├── test_normalization.py │ ├── test_registries.py │ ├── test_schema.py │ ├── test_utils.py │ └── test_validation.py ├── utils.py └── validator.py ├── docs ├── Makefile ├── _static │ ├── cerberus.png │ └── style.css ├── api.rst ├── authors.rst ├── changelog.rst ├── conf.py ├── contact.rst ├── contribute.rst ├── customize.rst ├── errors.rst ├── external_resources.rst ├── faq.rst ├── includes │ ├── .gitignore │ └── generate.py ├── index.rst ├── install.rst ├── license.rst ├── normalization-rules.rst ├── requirements.txt ├── schemas.rst ├── upgrading.rst ├── usage.rst └── validation-rules.rst ├── pyproject.toml └── tox.ini /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 10 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | name: Publish on PyPI 4 | 5 | on: 6 | push: 7 | tags: 8 | - "*" 9 | 10 | jobs: 11 | tests: 12 | uses: ./.github/workflows/tests.yml 13 | 14 | publish: 15 | needs: tests 16 | runs-on: ubuntu-latest 17 | permissions: 18 | id-token: write 19 | steps: 20 | - uses: actions/checkout@v3 21 | - run: pip install build 22 | - run: python -m build 23 | - uses: pypa/gh-action-pypi-publish@release/v1 24 | 25 | ... 26 | -------------------------------------------------------------------------------- /.github/workflows/regular-checks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | name: Quarterly checks 4 | 5 | on: 6 | schedule: 7 | - cron: "44 19 16 */3 *" 8 | 9 | jobs: 10 | 11 | CodeQL: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout repository 15 | uses: actions/checkout@v3 16 | - name: Initialize CodeQL 17 | uses: github/codeql-action/init@v2 18 | with: 19 | languages: 'python' 20 | - name: Perform CodeQL Analysis 21 | uses: github/codeql-action/analyze@v2 22 | 23 | other: 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v3 27 | - uses: actions/setup-python@v4 28 | with: 29 | python-version: 3.x 30 | - run: python -m pip install tox 31 | - run: tox -e doclinks 32 | 33 | ... 34 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | name: Run tests 4 | 5 | on: 6 | pull_request: 7 | push: 8 | branches: 9 | - 1.3.x 10 | workflow_call: 11 | workflow_dispatch: 12 | inputs: 13 | ref: 14 | description: A git reference to check out. 15 | default: 1.3.x 16 | required: true 17 | type: string 18 | 19 | jobs: 20 | tests: 21 | runs-on: ubuntu-latest 22 | strategy: 23 | matrix: 24 | python-version: 25 | - 3.7 # 2023-06-07 26 | - 3.8 # 2024-10 27 | - 3.9 # 2025-10 28 | - "3.10" # 2026-10 29 | - 3.11 # 2027-10 30 | - 3.12 # 2028-10 31 | - 3.13 # 2029-10 32 | - pypy3.7 33 | - pypy3.8 34 | - pypy3.9 35 | - pypy3.10 36 | steps: 37 | - uses: actions/checkout@v3 38 | with: 39 | ref: ${{ inputs.ref || github.ref }} 40 | - uses: actions/setup-python@v4 41 | with: 42 | python-version: ${{ matrix.python-version }} 43 | - run: python -m pip install tox 44 | - run: tox -e py 45 | 46 | package: 47 | runs-on: ubuntu-latest 48 | steps: 49 | - uses: actions/checkout@v3 50 | with: 51 | ref: ${{ inputs.ref || github.ref }} 52 | - run: pip install --upgrade build pip 53 | - run: python -m build --wheel 54 | - run: pip install dist/*.whl 55 | 56 | other: 57 | runs-on: ubuntu-latest 58 | strategy: 59 | matrix: 60 | include: 61 | - environment: doctest 62 | - environment: linting 63 | steps: 64 | - uses: actions/checkout@v3 65 | with: 66 | ref: ${{ inputs.ref || github.ref }} 67 | - uses: actions/setup-python@v4 68 | with: 69 | python-version: 3.x 70 | - run: python -m pip install tox 71 | - run: tox -e ${{ matrix.environment }} 72 | 73 | ... 74 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[co] 2 | 3 | # Packages 4 | *.egg 5 | *.egg-info 6 | dist 7 | build 8 | eggs 9 | parts 10 | bin 11 | var 12 | sdist 13 | develop-eggs 14 | .installed.cfg 15 | .eggs/ 16 | 17 | # Installer logs 18 | pip-log.txt 19 | 20 | # Testing 21 | .benchmarks/ 22 | .cache 23 | .coverage 24 | .mypy_cache/ 25 | .pytest_cache/ 26 | .tox 27 | 28 | # Translations 29 | *.mo 30 | 31 | # Mr Developer 32 | .mr.developer.cfg 33 | 34 | # Sphinx 35 | _build 36 | 37 | # Jetbrains 38 | .idea 39 | .vscode/ 40 | .devcontainer/ 41 | -------------------------------------------------------------------------------- /.linting-config.yaml: -------------------------------------------------------------------------------- 1 | # mind that a similar file exists for pre-commit usage 2 | repos: 3 | - repo: https://github.com/psf/black 4 | rev: stable 5 | hooks: 6 | - id: black 7 | args: [--check] 8 | language_version: python3.12 9 | types: 10 | - python 11 | exclude: ^docs/conf.py$ 12 | - repo: https://github.com/pre-commit/pre-commit-hooks 13 | rev: v1.4.0 14 | hooks: 15 | - id: check-yaml 16 | types: 17 | - yaml 18 | - id: debug-statements 19 | types: 20 | - python 21 | - id: flake8 22 | types: 23 | - python 24 | exclude: ^docs/conf.py$ 25 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # mind that a similar file exists for linting usage 2 | repos: 3 | - repo: https://github.com/psf/black 4 | rev: stable 5 | hooks: 6 | - id: black 7 | args: [--quiet] 8 | language_version: python3.12 9 | types: 10 | - python 11 | exclude: ^docs/conf.py$ 12 | - repo: https://github.com/pre-commit/pre-commit-hooks 13 | rev: v1.4.0 14 | hooks: 15 | - id: check-yaml 16 | types: 17 | - yaml 18 | - id: debug-statements 19 | types: 20 | - python 21 | - id: flake8 22 | types: 23 | - python 24 | exclude: ^docs/conf.py$ 25 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | version: 2 4 | 5 | build: 6 | os: ubuntu-22.04 7 | tools: 8 | python: "3" 9 | 10 | formats: all 11 | 12 | python: 13 | install: 14 | - path: . 15 | - requirements: docs/requirements.txt 16 | 17 | sphinx: 18 | configuration: docs/conf.py 19 | 20 | ... 21 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Cerberus is developed and maintained by the Cerberus community. It was created 2 | by Nicola Iarocci. 3 | 4 | Core maintainers 5 | ~~~~~~~~~~~~~~~~ 6 | 7 | - Nicola Iarocci (nicolaiarocci) 8 | - Frank Sachsenheim (funkyfuture) 9 | 10 | Contributors 11 | ~~~~~~~~~~~~ 12 | 13 | - Antoine Lubineau 14 | - Arsh Singh 15 | - Audric Schiltknecht 16 | - Brandon Aubie 17 | - Brett 18 | - Bruno Oliveira 19 | - Bryan W. Weber 20 | - C.D. Clark III 21 | - Christian Hogan 22 | - Connor Zapfel 23 | - Damián Nohales 24 | - Danielle Pizzolli 25 | - Davis Kirkendall 26 | - Denis Carriere 27 | - Dominik Kellner 28 | - Eelke Hermens 29 | - Evgeny Odegov 30 | - Florian Rathgeber 31 | - Gabriel Wainer 32 | - Harro van der Klauw 33 | - Jaroslav Semančík 34 | - Jonathan Huot 35 | - Kaleb Pomeroy 36 | - Kirill Pavlov 37 | - Kornelijus Survila 38 | - Lujeni 39 | - Luke Bechtel 40 | - Luo Peng 41 | - Martijn Vermaat 42 | - Martin Ortbauer 43 | - Matthew Ellison 44 | - Michael Klich 45 | - Nik Haldimann 46 | - Nikita Melentev 47 | - Nikita Vlaznev 48 | - Paul Weaver 49 | - Peter Demin 50 | - Riccardo 51 | - Roman Redkovich 52 | - Scott Crunkleton 53 | - Sebastian Heid 54 | - Sebastian Rajo 55 | - Sergey Leshchenko 56 | - Tobias Betz 57 | - Trong Hieu HA 58 | - Vipul Gupta 59 | - Waldir Pimenta 60 | - Yauhen Shulitski 61 | - calve 62 | - gilbsgilbs 63 | 64 | A full, up-to-date list of contributors is available from git with: 65 | 66 | git shortlog -sne 67 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | ISC License 2 | 3 | Copyright (c) 2012-2016 Nicola Iarocci. 4 | 5 | Permission to use, copy, modify, and/or distribute this software for any 6 | purpose with or without fee is hereby granted, provided that the above 7 | copyright notice and this permission notice appear in all copies. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 10 | REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND 11 | FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 12 | INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 13 | LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR 14 | OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 15 | PERFORMANCE OF THIS SOFTWARE. 16 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include AUTHORS 2 | include CHANGES 3 | include CONTRIBUTING.rst 4 | include LICENSE 5 | include README.rst 6 | include ROADMAP.md 7 | include UPGRADING.rst 8 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Cerberus |latest-version| 2 | ========================= 3 | |python-support| |black| 4 | 5 | Cerberus is a lightweight and extensible data validation library for Python. 6 | 7 | .. code-block:: python 8 | 9 | >>> v = Validator({'name': {'type': 'string'}}) 10 | >>> v.validate({'name': 'john doe'}) 11 | True 12 | 13 | 14 | Features 15 | -------- 16 | 17 | Cerberus provides type checking and other base functionality out of the box and 18 | is designed to be non-blocking and easily and widely extensible, allowing for 19 | custom validation. It has no dependencies, but has the potential to become 20 | yours. 21 | 22 | 23 | Versioning & Interpreter support 24 | -------------------------------- 25 | 26 | Starting with Cerberus 1.2, it is maintained according to 27 | `semantic versioning`_. So, a major release sheds off the old and defines a 28 | space for the new, minor releases ship further new features and improvements 29 | (you know the drill, new bugs are inevitable too), and micro releases polish a 30 | definite amount of features to glory. 31 | 32 | We intend to test Cerberus against all CPython interpreters at least until half 33 | a year after their `end of life`_ and against the most recent PyPy interpreter 34 | as a requirement for a release. If you still need to use it with a potential 35 | security hole in your setup, it should most probably work with the latest 36 | minor version branch from the time when the interpreter was still tested. 37 | Subsequent minor versions have good chances as well. In any case, you are 38 | advised to run the contributed test suite on your target system. 39 | 40 | 41 | Documentation 42 | ------------- 43 | 44 | Complete documentation is available at http://docs.python-cerberus.org 45 | 46 | 47 | Installation 48 | ------------ 49 | 50 | Cerberus is on PyPI_, so all you need to do is: 51 | 52 | .. code-block:: console 53 | 54 | $ pip install cerberus 55 | 56 | 57 | Testing 58 | ------- 59 | 60 | Just run: 61 | 62 | .. code-block:: console 63 | 64 | $ python setup.py test 65 | 66 | Or you can use tox to run the tests under all supported Python versions. Make 67 | sure the required python versions are installed and run: 68 | 69 | .. code-block:: console 70 | 71 | $ pip install tox # first time only 72 | $ tox 73 | 74 | 75 | Contributing 76 | ------------ 77 | 78 | Please see the `Contribution Guidelines`_. 79 | 80 | 81 | Copyright 82 | --------- 83 | 84 | Cerberus is an open source project by `Nicola Iarocci`_. See the license_ file 85 | for more information. 86 | 87 | 88 | .. _Contribution Guidelines: https://github.com/pyeve/cerberus/blob/1.3.x/CONTRIBUTING.rst 89 | .. _end of life: https://devguide.python.org/#status-of-python-branches 90 | .. _license: https://github.com/pyeve/cerberus/blob/1.3.x/LICENSE 91 | .. _Nicola Iarocci: https://nicolaiarocci.com/ 92 | .. _PyPI: https://pypi.python.org/ 93 | .. _semantic versioning: https://semver.org/ 94 | 95 | .. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg 96 | :alt: Black code style 97 | :target: https://black.readthedocs.io/ 98 | .. |latest-version| image:: https://img.shields.io/pypi/v/cerberus.svg 99 | :alt: Latest version on PyPI 100 | :target: https://pypi.org/project/cerberus 101 | .. |license| image:: https://img.shields.io/pypi/l/cerberus.svg 102 | :alt: Software license 103 | :target: https://github.com/pyeve/cerberus/blob/1.3.x/LICENSE 104 | .. |python-support| image:: https://img.shields.io/pypi/pyversions/cerberus.svg 105 | :target: https://pypi.python.org/pypi/cerberus 106 | :alt: Python versions 107 | -------------------------------------------------------------------------------- /UPGRADING.rst: -------------------------------------------------------------------------------- 1 | Upgrading to Cerberus 1.0 2 | ========================= 3 | 4 | Major Additions 5 | --------------- 6 | 7 | Error Handling 8 | .............. 9 | 10 | The inspection on and representation of errors is thoroughly overhauled and 11 | allows a more detailed and flexible handling. Make sure you have look on 12 | :doc:`errors`. 13 | 14 | Also, :attr:`~cerberus.Validator.errors` (as provided by the default 15 | :class:`~cerberus.errors.BasicErrorHandler`) values are lists containing 16 | error messages, and possibly a ``dict`` as last item containing nested errors. 17 | Previously, they were strings if single errors per field occurred; lists 18 | otherwise. 19 | 20 | 21 | Deprecations 22 | ------------ 23 | 24 | ``Validator`` class 25 | ................... 26 | 27 | transparent_schema_rules 28 | ~~~~~~~~~~~~~~~~~~~~~~~~ 29 | 30 | In the past you could override the schema validation by setting 31 | ``transparent_schema_rules`` to ``True``. Now all rules whose implementing 32 | method's docstring contain a schema to validate the arguments for that rule in the 33 | validation schema, are validated. 34 | To omit the schema validation for a particular rule, just omit that definition, 35 | but consider it a bad practice. 36 | The :class:`~cerberus.Validator`-attribute and -initialization-argument 37 | ``transparent_schema_rules`` are removed without replacement. 38 | 39 | validate_update 40 | ~~~~~~~~~~~~~~~ 41 | 42 | The method ``validate_update`` has been removed from 43 | :class:`~cerberus.Validator`. Instead use :meth:`~cerberus.Validator.validate` 44 | with the keyword-argument ``update`` set to ``True``. 45 | 46 | 47 | Rules 48 | ..... 49 | 50 | items (for mappings) 51 | ~~~~~~~~~~~~~~~~~~~~ 52 | 53 | The usage of the ``items``-rule is restricted to sequences. 54 | If you still had schemas that used that rule to validate 55 | :term:`mappings `, just rename these instances to ``schema`` 56 | (:ref:`docs `). 57 | 58 | keyschema & valueschema 59 | ~~~~~~~~~~~~~~~~~~~~~~~ 60 | 61 | To reflect the common terms in the Pythoniverse [#]_, the rule for validating 62 | all *values* of a :term:`mapping` was renamed from ``keyschema`` to 63 | ``valueschema``. Furthermore a rule was implemented to validate all *keys*, 64 | introduced as ``propertyschema``, now renamed to ``keyschema``. This means code 65 | using prior versions of cerberus would not break, but bring up wrong results! 66 | 67 | To update your code you may adapt cerberus' iteration: 68 | 69 | 1. Rename ``keyschema`` to ``valueschema`` in your schemas. (``0.9``) 70 | 2. Rename ``propertyschema`` to ``keyschema`` in your schemas. (``1.0``) 71 | 72 | Note that ``propertyschema`` will *not* be handled as an alias like 73 | ``keyschema`` was in the ``0.9``-branch. 74 | 75 | 76 | Custom validators 77 | ................. 78 | 79 | Data types 80 | ~~~~~~~~~~ 81 | 82 | Since the ``type``-rule allowed multiple arguments cerberus' type validation 83 | code was somewhat cumbersome as it had to deal with the circumstance that each 84 | type checking method would file an error though another one may not - and thus 85 | positively validate the constraint as a whole. 86 | The refactoring of the error handling allows cerberus' type validation to be 87 | much more lightweight and to formulate the corresponding methods in a simpler 88 | way. 89 | 90 | Previously such a method would test what a value *is not* and submit an error. 91 | Now a method tests what a value *is* to be expected and returns ``True`` in 92 | that case. 93 | 94 | This is the most critical part of updating your code, but still easy when your 95 | head is clear. Of course your code is well tested. It's essentially these 96 | three steps. Search, Replace and Regex may come at your service. 97 | 98 | 1. Remove the second method's argument (probably named ``field``). 99 | 2. Invert the logic of the conditional clauses where is tested what a value 100 | is not / has not. 101 | 3. Replace calls to ``self._error`` below such clauses with 102 | ``return True``. 103 | 104 | A method doesn't need to return ``False`` or any value when expected criteria 105 | are not met. 106 | 107 | Here's the change from the :ref:`documentation ` example. 108 | 109 | pre-1.0: 110 | 111 | .. code-block:: python 112 | 113 | def _validate_type_objectid(self, field, value): 114 | if not re.match('[a-f0-9]{24}', value): 115 | self._error(field, errors.BAD_TYPE) 116 | 117 | 1.0: 118 | 119 | .. code-block:: python 120 | 121 | def _validate_type_objectid(self, value): 122 | if re.match('[a-f0-9]{24}', value): 123 | return True 124 | 125 | 126 | 127 | .. [#] compare :term:`dictionary` 128 | -------------------------------------------------------------------------------- /artwork/cerberus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyeve/cerberus/c07c2f942873bd90d333347cb679850a85680aa6/artwork/cerberus.png -------------------------------------------------------------------------------- /cerberus/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Extensible validation for Python dictionaries. 3 | 4 | :copyright: 2012-2023 by Nicola Iarocci. 5 | :license: ISC, see LICENSE for more details. 6 | 7 | Full documentation is available at https://python-cerberus.org/ 8 | 9 | """ 10 | 11 | from __future__ import absolute_import 12 | 13 | from cerberus.platform import importlib_metadata 14 | from cerberus.schema import rules_set_registry, schema_registry, SchemaError 15 | from cerberus.utils import TypeDefinition 16 | from cerberus.validator import DocumentError, Validator 17 | 18 | 19 | try: 20 | __version__ = importlib_metadata.version("Cerberus") 21 | except importlib_metadata.PackageNotFoundError: 22 | __version__ = "unknown" 23 | 24 | __all__ = [ 25 | DocumentError.__name__, 26 | SchemaError.__name__, 27 | TypeDefinition.__name__, 28 | Validator.__name__, 29 | "schema_registry", 30 | "rules_set_registry", 31 | "__version__", 32 | ] 33 | -------------------------------------------------------------------------------- /cerberus/benchmarks/__init__.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | 4 | DOCUMENTS_PATH = Path(__file__).parent / "documents" 5 | -------------------------------------------------------------------------------- /cerberus/benchmarks/schemas/overalll_schema_2.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | 3 | 4 | P_TYPES = ['ONE', 'TWO'] 5 | T_TYPES = ['NO', 'V20'] 6 | 7 | 8 | def to_bool(value): 9 | return value.lower() in ('true', '1') 10 | 11 | 12 | def allowed_tax(value): 13 | return value if value.upper() in T_TYPES else 'NO' 14 | 15 | 16 | def allowed_types(value): 17 | return value if value.upper() in P_TYPES else 'ONE' 18 | 19 | 20 | def none_to_zero(value): 21 | return 0 if value in (None, "") else value 22 | 23 | 24 | def empty_str_to_null(value): 25 | return None if value == '' else value 26 | 27 | 28 | product_schema = { 29 | 'uuid': { 30 | 'type': 'string', 31 | 'required': True, 32 | 'nullable': False, 33 | 'default_setter': lambda x: uuid.uuid4().__str__(), 34 | }, 35 | 'name': { 36 | 'type': 'string', 37 | 'minlength': 1, 38 | 'maxlength': 100, 39 | 'required': True, 40 | 'nullable': False, 41 | 'default': 'noname product', 42 | }, 43 | 'group': { 44 | 'type': 'boolean', 45 | 'required': False, 46 | 'nullable': False, 47 | 'default': False, 48 | }, 49 | 'parentUuid': { 50 | 'type': 'string', 51 | 'required': False, 52 | 'nullable': True, 53 | 'default': None, 54 | }, 55 | 'hasVariants': { 56 | 'type': 'boolean', 57 | 'required': False, 58 | 'nullable': False, 59 | 'default': False, 60 | }, 61 | 'type': { 62 | 'type': 'string', 63 | 'required': True, 64 | 'allowed': P_TYPES, 65 | 'nullable': False, 66 | 'default': 'ONE', 67 | 'coerce': allowed_types, 68 | }, 69 | 'quantity': { 70 | 'type': 'float', 71 | 'required': True, 72 | 'nullable': False, 73 | 'default': 0, 74 | 'coerce': (none_to_zero, float), 75 | }, 76 | 'measureName': { 77 | 'type': 'string', 78 | 'required': True, 79 | 'nullable': False, 80 | 'default': '', 81 | 'coerce': str, 82 | }, 83 | 'tax': { 84 | 'type': 'string', 85 | 'required': True, 86 | 'allowed': T_TYPES, 87 | 'nullable': False, 88 | 'default': 'NO', 89 | 'coerce': allowed_tax, 90 | }, 91 | 'price': { 92 | 'type': 'float', 93 | 'required': True, 94 | 'min': 0, 95 | 'max': 9999999.99, 96 | 'nullable': False, 97 | 'default': 0, 98 | 'coerce': (none_to_zero, float), 99 | }, 100 | 'allowToSell': { 101 | 'type': 'boolean', 102 | 'required': True, 103 | 'nullable': False, 104 | 'default': True, 105 | 'coerce': (str, to_bool), 106 | }, 107 | 'costPrice': { 108 | 'type': 'float', 109 | 'min': 0, 110 | 'max': 9999999.99, 111 | 'required': True, 112 | 'nullable': False, 113 | 'default': 0, 114 | 'coerce': (none_to_zero, float), 115 | }, 116 | 'description': { 117 | 'type': 'string', 118 | 'minlength': 0, 119 | 'required': False, 120 | 'nullable': True, 121 | 'default': '', 122 | 'coerce': str, 123 | }, 124 | 'articleNumber': { 125 | 'type': 'string', 126 | 'minlength': 0, 127 | 'maxlength': 20, 128 | 'required': False, 129 | 'nullable': True, 130 | 'coerce': (empty_str_to_null, lambda s: str(s)[0:19]), 131 | 'default': '', 132 | }, 133 | 'code': { 134 | 'type': 'string', 135 | 'minlength': 0, 136 | 'maxlength': 10, 137 | 'required': True, 138 | 'coerce': (empty_str_to_null, lambda s: str(s)[0:9]), 139 | 'default': '', 140 | }, 141 | 'barCodes': {'type': 'list', 'required': True, 'nullable': True, 'default': []}, 142 | 'alcoCodes': {'type': 'list', 'required': True, 'nullable': True, 'default': []}, 143 | 'alcoholByVolume': { 144 | 'type': ['float', 'string'], 145 | 'required': True, 146 | 'nullable': True, 147 | 'default': None, 148 | 'coerce': (empty_str_to_null, float), 149 | }, 150 | 'alcoholProductKindCode': { 151 | 'type': ['float', 'string'], 152 | 'required': True, 153 | 'nullable': True, 154 | 'default': None, 155 | 'coerce': (empty_str_to_null, int), 156 | }, 157 | 'tareVolume': { 158 | 'type': ['float', 'string'], 159 | 'required': True, 160 | 'nullable': True, 161 | 'default': None, 162 | 'coerce': (empty_str_to_null, float), 163 | }, 164 | } 165 | -------------------------------------------------------------------------------- /cerberus/benchmarks/test_overall_performance_1.py: -------------------------------------------------------------------------------- 1 | """ 2 | some notes regarding this test suite: 3 | - results are only comparable using the semantically equal schema against and 4 | identical set of documents in the same execution environment 5 | - the module can be executed to generate a new set of test documents 6 | - it is intended to detect *significant* changes in validation time 7 | - benchmarks should run with as few other processes running on the system as 8 | possible (e.g. an Alpine Linux on bare metal w/o a Desktop environment) 9 | """ 10 | 11 | import json 12 | from collections import Counter 13 | from pathlib import Path 14 | from random import choice, randrange 15 | from typing import Callable, List 16 | 17 | from pytest import mark 18 | 19 | from cerberus import rules_set_registry, schema_registry, TypeDefinition, Validator 20 | from cerberus.benchmarks import DOCUMENTS_PATH 21 | 22 | 23 | rules_set_registry.add("path_rules", {"coerce": Path, "type": "path"}) 24 | 25 | 26 | schema_registry.add( 27 | "field_3_schema", 28 | { 29 | # an outer rule requires all fields' values to be a list 30 | "field_31": {"contains": 0, "empty": False}, 31 | "field_32": { 32 | "default": [None, None, None], 33 | "items": [ 34 | {"type": "integer"}, 35 | {"type": "string"}, 36 | {"type": ["integer", "string"]}, 37 | ], 38 | "schema": {"nullable": True}, 39 | }, 40 | }, 41 | ) 42 | 43 | 44 | def schema_1_field_3_allow_unknown_check_with(field, value, error): 45 | if len(value) > 9: 46 | error(field, "Requires a smaller list.") 47 | 48 | 49 | schema_1 = { 50 | "field_1": { 51 | "type": "dict", 52 | "required": True, 53 | "allow_unknown": True, 54 | "keysrules": {"regex": r"field_1[12345]"}, 55 | "minlength": 3, 56 | "maxlength": 5, 57 | "schema": { 58 | "field_11": { 59 | "type": "integer", 60 | "allowed": list(range(100)), 61 | "dependencies": {"field_12": 0, "^field_1.field_13": 0}, 62 | }, 63 | "field_12": { 64 | "type": "integer", 65 | "default_setter": lambda _: 1, 66 | "forbidden": (1,), 67 | }, 68 | "field_13": {"type": "integer"}, 69 | "field_14": {"rename": "field_13"}, 70 | }, 71 | }, 72 | "field_2": { 73 | "type": "dict", 74 | "allow_unknown": False, 75 | "schema": { 76 | "field_21": { 77 | "type": "integer", 78 | "coerce": [str.strip, int], 79 | "min": 9, 80 | "max": 89, 81 | "anyof": [{"dependencies": "field_22"}, {"dependencies": "field_23"}], 82 | }, 83 | "field_22": {"excludes": "field_23", "nullable": True}, 84 | "field_23": {"nullable": True}, 85 | }, 86 | }, 87 | "field_3": { 88 | "allow_unknown": {"check_with": schema_1_field_3_allow_unknown_check_with}, 89 | "valuesrules": {"type": "list"}, 90 | "require_all": True, 91 | "schema": "field_3_schema", 92 | }, 93 | "field_4": "path_rules", 94 | } 95 | 96 | 97 | def init_validator(): 98 | class TestValidator(Validator): 99 | types_mapping = { 100 | **Validator.types_mapping, 101 | "path": TypeDefinition("path", (Path,), ()), 102 | } 103 | 104 | return TestValidator(schema_1, purge_unknown=True) 105 | 106 | 107 | def load_documents(): 108 | with (DOCUMENTS_PATH / "overall_documents_1.json").open() as f: 109 | documents = json.load(f) 110 | return documents 111 | 112 | 113 | def validate_documents(init_validator: Callable, documents: List[dict]): 114 | doc_count = failed_count = 0 115 | error_paths = Counter() 116 | validator = init_validator() 117 | 118 | def count_errors(errors): 119 | if errors is None: 120 | return 121 | for error in errors: 122 | if error.is_group_error: 123 | count_errors(error.child_errors) 124 | else: 125 | error_paths[error.schema_path] += 1 126 | 127 | for document in documents: 128 | if validator.validated(document) is None: 129 | failed_count += 1 130 | count_errors(validator._errors) 131 | doc_count += 1 132 | 133 | print( 134 | f"{failed_count} out of {doc_count} documents failed with " 135 | f"{len(error_paths)} different error leafs." 136 | ) 137 | print("Top 3 errors, excluding container errors:") 138 | for path, count in error_paths.most_common(3): 139 | print(f"{count}: {path}") 140 | 141 | 142 | @mark.benchmark(group="overall-1") 143 | def test_overall_performance_1(benchmark): 144 | benchmark.pedantic(validate_documents, (init_validator, load_documents()), rounds=5) 145 | 146 | 147 | # 148 | 149 | 150 | def generate_sample_document_1() -> dict: 151 | result = {} 152 | for i in (1, 2, 3, 4, 5): 153 | if randrange(100): 154 | result[f"field_{i}"] = globals()[f"generate_document_1_field_{i}"]() 155 | return result 156 | 157 | 158 | def generate_document_1_field_1() -> dict: 159 | result = {"field_11": randrange(100), "field_13": 0} 160 | if randrange(100): 161 | result["field_12"] = 0 162 | if not randrange(100): 163 | result["field_14"] = None 164 | if randrange(100): 165 | result["field_15"] = None 166 | return result 167 | 168 | 169 | def generate_document_1_field_2() -> dict: 170 | x = "*" if not randrange(50) else " " 171 | result = {"field_21": x + str(randrange(100)) + x} 172 | 173 | if randrange(100): 174 | result["field_22"] = None 175 | if "field_22" in result and not randrange(100): 176 | result["field_23"] = None 177 | 178 | return result 179 | 180 | 181 | def generate_document_1_field_3() -> dict: 182 | result = {} 183 | if randrange(100): 184 | result["field_31"] = [randrange(2) for _ in range(randrange(20))] 185 | else: 186 | result["field_31"] = None 187 | if randrange(100): 188 | result["field_32"] = [ 189 | choice((0, 0, 0, 0, 0, 0, 0, 0, "", None)), 190 | choice(("", "", "", "", "", "", "", "", 0, None)), 191 | choice((0, 0, 0, 0, "", "", "", "", None)), 192 | ] 193 | if not randrange(10): 194 | result["3_unknown"] = [0] * (randrange(10) + 1) 195 | return result 196 | 197 | 198 | def generate_document_1_field_4(): 199 | return "/foo/bar" if randrange(100) else 0 200 | 201 | 202 | def generate_document_1_field_5(): 203 | return None 204 | 205 | 206 | def write_sample_documents(): 207 | with (DOCUMENTS_PATH / "overall_documents_1.json").open("wt") as f: 208 | json.dump([generate_sample_document_1() for _ in range(10_000)], f) 209 | 210 | 211 | if __name__ == "__main__": 212 | write_sample_documents() 213 | -------------------------------------------------------------------------------- /cerberus/benchmarks/test_overall_performance_2.py: -------------------------------------------------------------------------------- 1 | import json 2 | from collections import Counter 3 | from typing import Callable, List 4 | from typing import Counter as CounterType 5 | 6 | from pytest import mark 7 | 8 | from cerberus import Validator 9 | from cerberus.benchmarks.schemas.overalll_schema_2 import product_schema 10 | from cerberus.benchmarks import DOCUMENTS_PATH 11 | 12 | 13 | def init_validator(): 14 | return Validator(product_schema, purge_unknown=True) 15 | 16 | 17 | def load_documents(): 18 | with (DOCUMENTS_PATH / "overall_documents_2.json").open() as f: 19 | documents = json.load(f) 20 | return documents 21 | 22 | 23 | def validate_documents(init_validator: Callable, documents: List[dict]) -> None: 24 | doc_count = failed_count = 0 25 | error_paths: CounterType[tuple] = Counter() 26 | validator = init_validator() 27 | 28 | def count_errors(errors): 29 | if errors is None: 30 | return 31 | for error in errors: 32 | if error.is_group_error: 33 | count_errors(error.child_errors) 34 | else: 35 | error_paths[error.schema_path] += 1 36 | 37 | for document in documents: 38 | if validator.validated(document) is None: 39 | failed_count += 1 40 | count_errors(validator._errors) 41 | doc_count += 1 42 | 43 | print( 44 | f"{failed_count} out of {doc_count} documents failed with " 45 | f"{len(error_paths)} different error leafs." 46 | ) 47 | print("Top 3 errors, excluding container errors:") 48 | for path, count in error_paths.most_common(3): 49 | print(f"{count}: {path}") 50 | 51 | 52 | @mark.benchmark(group="overall-2") 53 | def test_overall_performance_2(benchmark): 54 | benchmark.pedantic(validate_documents, (init_validator, load_documents()), rounds=5) 55 | -------------------------------------------------------------------------------- /cerberus/errors.py: -------------------------------------------------------------------------------- 1 | # -*-: coding utf-8 -*- 2 | """ This module contains the error-related constants and classes. """ 3 | 4 | from __future__ import absolute_import 5 | 6 | import sys 7 | from collections import defaultdict, namedtuple 8 | from copy import copy, deepcopy 9 | from functools import wraps 10 | from pprint import pformat 11 | 12 | from cerberus.platform import MutableMapping 13 | from cerberus.utils import compare_paths_lt, quote_string 14 | 15 | 16 | ErrorDefinition = namedtuple('ErrorDefinition', 'code, rule') 17 | """ 18 | This class is used to define possible errors. Each distinguishable error is 19 | defined by a *unique* error ``code`` as integer and the ``rule`` that can 20 | cause it as string. 21 | The instances' names do not contain a common prefix as they are supposed to be 22 | referenced within the module namespace, e.g. ``errors.CUSTOM``. 23 | """ 24 | 25 | 26 | # custom 27 | CUSTOM = ErrorDefinition(0x00, None) 28 | 29 | # existence 30 | DOCUMENT_MISSING = ErrorDefinition(0x01, None) # issues/141 31 | DOCUMENT_MISSING = "document is missing" 32 | REQUIRED_FIELD = ErrorDefinition(0x02, 'required') 33 | UNKNOWN_FIELD = ErrorDefinition(0x03, None) 34 | DEPENDENCIES_FIELD = ErrorDefinition(0x04, 'dependencies') 35 | DEPENDENCIES_FIELD_VALUE = ErrorDefinition(0x05, 'dependencies') 36 | EXCLUDES_FIELD = ErrorDefinition(0x06, 'excludes') 37 | 38 | # shape 39 | DOCUMENT_FORMAT = ErrorDefinition(0x21, None) # issues/141 40 | DOCUMENT_FORMAT = "'{0}' is not a document, must be a dict" 41 | EMPTY_NOT_ALLOWED = ErrorDefinition(0x22, 'empty') 42 | NOT_NULLABLE = ErrorDefinition(0x23, 'nullable') 43 | BAD_TYPE = ErrorDefinition(0x24, 'type') 44 | BAD_TYPE_FOR_SCHEMA = ErrorDefinition(0x25, 'schema') 45 | ITEMS_LENGTH = ErrorDefinition(0x26, 'items') 46 | MIN_LENGTH = ErrorDefinition(0x27, 'minlength') 47 | MAX_LENGTH = ErrorDefinition(0x28, 'maxlength') 48 | 49 | 50 | # color 51 | REGEX_MISMATCH = ErrorDefinition(0x41, 'regex') 52 | MIN_VALUE = ErrorDefinition(0x42, 'min') 53 | MAX_VALUE = ErrorDefinition(0x43, 'max') 54 | UNALLOWED_VALUE = ErrorDefinition(0x44, 'allowed') 55 | UNALLOWED_VALUES = ErrorDefinition(0x45, 'allowed') 56 | FORBIDDEN_VALUE = ErrorDefinition(0x46, 'forbidden') 57 | FORBIDDEN_VALUES = ErrorDefinition(0x47, 'forbidden') 58 | MISSING_MEMBERS = ErrorDefinition(0x48, 'contains') 59 | 60 | # other 61 | NORMALIZATION = ErrorDefinition(0x60, None) 62 | COERCION_FAILED = ErrorDefinition(0x61, 'coerce') 63 | RENAMING_FAILED = ErrorDefinition(0x62, 'rename_handler') 64 | READONLY_FIELD = ErrorDefinition(0x63, 'readonly') 65 | SETTING_DEFAULT_FAILED = ErrorDefinition(0x64, 'default_setter') 66 | 67 | # groups 68 | ERROR_GROUP = ErrorDefinition(0x80, None) 69 | MAPPING_SCHEMA = ErrorDefinition(0x81, 'schema') 70 | SEQUENCE_SCHEMA = ErrorDefinition(0x82, 'schema') 71 | # TODO remove KEYSCHEMA AND VALUESCHEMA with next major release 72 | KEYSRULES = KEYSCHEMA = ErrorDefinition(0x83, 'keysrules') 73 | VALUESRULES = VALUESCHEMA = ErrorDefinition(0x84, 'valuesrules') 74 | BAD_ITEMS = ErrorDefinition(0x8F, 'items') 75 | 76 | LOGICAL = ErrorDefinition(0x90, None) 77 | NONEOF = ErrorDefinition(0x91, 'noneof') 78 | ONEOF = ErrorDefinition(0x92, 'oneof') 79 | ANYOF = ErrorDefinition(0x93, 'anyof') 80 | ALLOF = ErrorDefinition(0x94, 'allof') 81 | 82 | 83 | """ SchemaError messages """ 84 | 85 | SCHEMA_ERROR_DEFINITION_TYPE = "schema definition for field '{0}' must be a dict" 86 | SCHEMA_ERROR_MISSING = "validation schema missing" 87 | 88 | 89 | """ Error representations """ 90 | 91 | 92 | class ValidationError(object): 93 | """A simple class to store and query basic error information.""" 94 | 95 | def __init__(self, document_path, schema_path, code, rule, constraint, value, info): 96 | self.document_path = document_path 97 | """ The path to the field within the document that caused the error. 98 | Type: :class:`tuple` """ 99 | self.schema_path = schema_path 100 | """ The path to the rule within the schema that caused the error. 101 | Type: :class:`tuple` """ 102 | self.code = code 103 | """ The error's identifier code. Type: :class:`int` """ 104 | self.rule = rule 105 | """ The rule that failed. Type: `string` """ 106 | self.constraint = constraint 107 | """ The constraint that failed. """ 108 | self.value = value 109 | """ The value that failed. """ 110 | self.info = info 111 | """ May hold additional information about the error. 112 | Type: :class:`tuple` """ 113 | 114 | def __eq__(self, other): 115 | """Assumes the errors relate to the same document and schema.""" 116 | return hash(self) == hash(other) 117 | 118 | def __hash__(self): 119 | """Expects that all other properties are transitively determined.""" 120 | return hash(self.document_path) ^ hash(self.schema_path) ^ hash(self.code) 121 | 122 | def __lt__(self, other): 123 | if self.document_path != other.document_path: 124 | return compare_paths_lt(self.document_path, other.document_path) 125 | else: 126 | return compare_paths_lt(self.schema_path, other.schema_path) 127 | 128 | def __repr__(self): 129 | return ( 130 | "{class_name} @ {memptr} ( " 131 | "document_path={document_path}," 132 | "schema_path={schema_path}," 133 | "code={code}," 134 | "constraint={constraint}," 135 | "value={value}," 136 | "info={info} )".format( 137 | class_name=self.__class__.__name__, 138 | memptr=hex(id(self)), # noqa: E501 139 | document_path=self.document_path, 140 | schema_path=self.schema_path, 141 | code=hex(self.code), 142 | constraint=quote_string(self.constraint), 143 | value=quote_string(self.value), 144 | info=self.info, 145 | ) 146 | ) 147 | 148 | @property 149 | def child_errors(self): 150 | """ 151 | A list that contains the individual errors of a bulk validation error. 152 | """ 153 | return self.info[0] if self.is_group_error else None 154 | 155 | @property 156 | def definitions_errors(self): 157 | """ 158 | Dictionary with errors of an \\*of-rule mapped to the index of the definition it 159 | occurred in. Returns :obj:`None` if not applicable. 160 | """ 161 | if not self.is_logic_error: 162 | return None 163 | 164 | result = defaultdict(list) 165 | for error in self.child_errors: 166 | i = error.schema_path[len(self.schema_path)] 167 | result[i].append(error) 168 | return result 169 | 170 | @property 171 | def field(self): 172 | """Field of the contextual mapping, possibly :obj:`None`.""" 173 | if self.document_path: 174 | return self.document_path[-1] 175 | else: 176 | return None 177 | 178 | @property 179 | def is_group_error(self): 180 | """``True`` for errors of bulk validations.""" 181 | return bool(self.code & ERROR_GROUP.code) 182 | 183 | @property 184 | def is_logic_error(self): 185 | """ 186 | ``True`` for validation errors against different schemas with \\*of-rules. 187 | """ 188 | return bool(self.code & LOGICAL.code - ERROR_GROUP.code) 189 | 190 | @property 191 | def is_normalization_error(self): 192 | """``True`` for normalization errors.""" 193 | return bool(self.code & NORMALIZATION.code) 194 | 195 | 196 | class ErrorList(list): 197 | """ 198 | A list for :class:`~cerberus.errors.ValidationError` instances that can be queried 199 | with the ``in`` keyword for a particular :class:`~cerberus.errors.ErrorDefinition`. 200 | """ 201 | 202 | def __contains__(self, error_definition): 203 | if not isinstance(error_definition, ErrorDefinition): 204 | raise TypeError 205 | 206 | wanted_code = error_definition.code 207 | return any(x.code == wanted_code for x in self) 208 | 209 | 210 | class ErrorTreeNode(MutableMapping): 211 | __slots__ = ('descendants', 'errors', 'parent_node', 'path', 'tree_root') 212 | 213 | def __init__(self, path, parent_node): 214 | self.parent_node = parent_node 215 | self.tree_root = self.parent_node.tree_root 216 | self.path = path[: self.parent_node.depth + 1] 217 | self.errors = ErrorList() 218 | self.descendants = {} 219 | 220 | def __contains__(self, item): 221 | if isinstance(item, ErrorDefinition): 222 | return item in self.errors 223 | else: 224 | return item in self.descendants 225 | 226 | def __delitem__(self, key): 227 | del self.descendants[key] 228 | 229 | def __iter__(self): 230 | return iter(self.errors) 231 | 232 | def __getitem__(self, item): 233 | if isinstance(item, ErrorDefinition): 234 | for error in self.errors: 235 | if item.code == error.code: 236 | return error 237 | return None 238 | else: 239 | return self.descendants.get(item) 240 | 241 | def __len__(self): 242 | return len(self.errors) 243 | 244 | def __repr__(self): 245 | return self.__str__() 246 | 247 | def __setitem__(self, key, value): 248 | self.descendants[key] = value 249 | 250 | def __str__(self): 251 | return str(self.errors) + ',' + str(self.descendants) 252 | 253 | @property 254 | def depth(self): 255 | return len(self.path) 256 | 257 | @property 258 | def tree_type(self): 259 | return self.tree_root.tree_type 260 | 261 | def add(self, error): 262 | error_path = self._path_of_(error) 263 | 264 | key = error_path[self.depth] 265 | if key not in self.descendants: 266 | self[key] = ErrorTreeNode(error_path, self) 267 | 268 | node = self[key] 269 | 270 | if len(error_path) == self.depth + 1: 271 | node.errors.append(error) 272 | node.errors.sort() 273 | if error.is_group_error: 274 | for child_error in error.child_errors: 275 | self.tree_root.add(child_error) 276 | else: 277 | node.add(error) 278 | 279 | def _path_of_(self, error): 280 | return getattr(error, self.tree_type + '_path') 281 | 282 | 283 | class ErrorTree(ErrorTreeNode): 284 | """ 285 | Base class for :class:`~cerberus.errors.DocumentErrorTree` and 286 | :class:`~cerberus.errors.SchemaErrorTree`. 287 | """ 288 | 289 | def __init__(self, errors=()): 290 | self.parent_node = None 291 | self.tree_root = self 292 | self.path = () 293 | self.errors = ErrorList() 294 | self.descendants = {} 295 | for error in errors: 296 | self.add(error) 297 | 298 | def add(self, error): 299 | """ 300 | Add an error to the tree. 301 | 302 | :param error: :class:`~cerberus.errors.ValidationError` 303 | """ 304 | if not self._path_of_(error): 305 | self.errors.append(error) 306 | self.errors.sort() 307 | else: 308 | super(ErrorTree, self).add(error) 309 | 310 | def fetch_errors_from(self, path): 311 | """ 312 | Returns all errors for a particular path. 313 | 314 | :param path: :class:`tuple` of :term:`hashable` s. 315 | :rtype: :class:`~cerberus.errors.ErrorList` 316 | """ 317 | node = self.fetch_node_from(path) 318 | if node is not None: 319 | return node.errors 320 | else: 321 | return ErrorList() 322 | 323 | def fetch_node_from(self, path): 324 | """ 325 | Returns a node for a path. 326 | 327 | :param path: Tuple of :term:`hashable` s. 328 | :rtype: :class:`~cerberus.errors.ErrorTreeNode` or :obj:`None` 329 | """ 330 | context = self 331 | for key in path: 332 | context = context[key] 333 | if context is None: 334 | break 335 | return context 336 | 337 | 338 | class DocumentErrorTree(ErrorTree): 339 | """ 340 | Implements a dict-like class to query errors by indexes following the structure of a 341 | validated document. 342 | """ 343 | 344 | tree_type = 'document' 345 | 346 | 347 | class SchemaErrorTree(ErrorTree): 348 | """ 349 | Implements a dict-like class to query errors by indexes following the structure of 350 | the used schema. 351 | """ 352 | 353 | tree_type = 'schema' 354 | 355 | 356 | class BaseErrorHandler(object): 357 | """Base class for all error handlers. 358 | Subclasses are identified as error-handlers with an instance-test.""" 359 | 360 | def __init__(self, *args, **kwargs): 361 | """Optionally initialize a new instance.""" 362 | pass 363 | 364 | def __call__(self, errors): 365 | """ 366 | Returns errors in a handler-specific format. 367 | 368 | :param errors: An object containing the errors. 369 | :type errors: :term:`iterable` of 370 | :class:`~cerberus.errors.ValidationError` instances or a 371 | :class:`~cerberus.Validator` instance 372 | """ 373 | raise NotImplementedError 374 | 375 | def __iter__(self): 376 | """Be a superhero and implement an iterator over errors.""" 377 | raise NotImplementedError 378 | 379 | def add(self, error): 380 | """ 381 | Add an error to the errors' container object of a handler. 382 | 383 | :param error: The error to add. 384 | :type error: :class:`~cerberus.errors.ValidationError` 385 | """ 386 | raise NotImplementedError 387 | 388 | def emit(self, error): 389 | """ 390 | Optionally emits an error in the handler's format to a stream. Or light a LED, 391 | or even shut down a power plant. 392 | 393 | :param error: The error to emit. 394 | :type error: :class:`~cerberus.errors.ValidationError` 395 | """ 396 | pass 397 | 398 | def end(self, validator): 399 | """ 400 | Gets called when a validation ends. 401 | 402 | :param validator: The calling validator. 403 | :type validator: :class:`~cerberus.Validator` 404 | """ 405 | pass 406 | 407 | def extend(self, errors): 408 | """ 409 | Adds all errors to the handler's container object. 410 | 411 | :param errors: The errors to add. 412 | :type errors: :term:`iterable` of 413 | :class:`~cerberus.errors.ValidationError` instances 414 | """ 415 | for error in errors: 416 | self.add(error) 417 | 418 | def start(self, validator): 419 | """ 420 | Gets called when a validation starts. 421 | 422 | :param validator: The calling validator. 423 | :type validator: :class:`~cerberus.Validator` 424 | """ 425 | pass 426 | 427 | 428 | class ToyErrorHandler(BaseErrorHandler): 429 | def __call__(self, *args, **kwargs): 430 | raise RuntimeError('This is not supposed to happen.') 431 | 432 | def clear(self): 433 | pass 434 | 435 | 436 | def encode_unicode(f): 437 | """Cerberus error messages expect regular binary strings. 438 | If unicode is used in a ValidationError message can't be printed. 439 | 440 | This decorator ensures that if legacy Python is used unicode 441 | strings are encoded before passing to a function. 442 | """ 443 | 444 | @wraps(f) 445 | def wrapped(obj, error): 446 | def _encode(value): 447 | """Helper encoding unicode strings into binary utf-8""" 448 | if isinstance(value, unicode): # noqa: F821 449 | return value.encode('utf-8') 450 | return value 451 | 452 | error = copy(error) 453 | error.document_path = _encode(error.document_path) 454 | error.schema_path = _encode(error.schema_path) 455 | error.constraint = _encode(error.constraint) 456 | error.value = _encode(error.value) 457 | error.info = _encode(error.info) 458 | return f(obj, error) 459 | 460 | return wrapped if sys.version_info < (3,) else f 461 | 462 | 463 | class BasicErrorHandler(BaseErrorHandler): 464 | """ 465 | Models cerberus' legacy. Returns a :class:`dict`. When mangled through :class:`str` 466 | a pretty-formatted representation of that tree is returned. 467 | """ 468 | 469 | messages = { 470 | 0x00: "{0}", 471 | 0x01: "document is missing", 472 | 0x02: "required field", 473 | 0x03: "unknown field", 474 | 0x04: "field '{0}' is required", 475 | 0x05: "depends on these values: {constraint}", 476 | 0x06: "{0} must not be present with '{field}'", 477 | 0x21: "'{0}' is not a document, must be a dict", 478 | 0x22: "empty values not allowed", 479 | 0x23: "null value not allowed", 480 | 0x24: "must be of {constraint} type", 481 | 0x25: "must be of dict type", 482 | 0x26: "length of list should be {0}, it is {1}", 483 | 0x27: "min length is {constraint}", 484 | 0x28: "max length is {constraint}", 485 | 0x41: "value does not match regex '{constraint}'", 486 | 0x42: "min value is {constraint}", 487 | 0x43: "max value is {constraint}", 488 | 0x44: "unallowed value {value}", 489 | 0x45: "unallowed values {0}", 490 | 0x46: "unallowed value {value}", 491 | 0x47: "unallowed values {0}", 492 | 0x48: "missing members {0}", 493 | 0x61: "field '{field}' cannot be coerced: {0}", 494 | 0x62: "field '{field}' cannot be renamed: {0}", 495 | 0x63: "field is read-only", 496 | 0x64: "default value for '{field}' cannot be set: {0}", 497 | 0x81: "mapping doesn't validate subschema: {0}", 498 | 0x82: "one or more sequence-items don't validate: {0}", 499 | 0x83: "one or more keys of a mapping don't validate: {0}", 500 | 0x84: "one or more values in a mapping don't validate: {0}", 501 | 0x85: "one or more sequence-items don't validate: {0}", 502 | 0x91: "one or more definitions validate", 503 | 0x92: "none or more than one rule validate", 504 | 0x93: "no definitions validate", 505 | 0x94: "one or more definitions don't validate", 506 | } 507 | 508 | def __init__(self, tree=None): 509 | self.tree = {} if tree is None else tree 510 | 511 | def __call__(self, errors): 512 | self.clear() 513 | self.extend(errors) 514 | return self.pretty_tree 515 | 516 | def __str__(self): 517 | return pformat(self.pretty_tree) 518 | 519 | @property 520 | def pretty_tree(self): 521 | pretty = deepcopy(self.tree) 522 | for field in pretty: 523 | self._purge_empty_dicts(pretty[field]) 524 | return pretty 525 | 526 | @encode_unicode 527 | def add(self, error): 528 | # Make sure the original error is not altered with 529 | # error paths specific to the handler. 530 | error = deepcopy(error) 531 | 532 | self._rewrite_error_path(error) 533 | 534 | if error.is_logic_error: 535 | self._insert_logic_error(error) 536 | elif error.is_group_error: 537 | self._insert_group_error(error) 538 | elif error.code in self.messages: 539 | self._insert_error( 540 | error.document_path, self._format_message(error.field, error) 541 | ) 542 | 543 | def clear(self): 544 | self.tree = {} 545 | 546 | def start(self, validator): 547 | self.clear() 548 | 549 | def _format_message(self, field, error): 550 | return self.messages[error.code].format( 551 | *error.info, constraint=error.constraint, field=field, value=error.value 552 | ) 553 | 554 | def _insert_error(self, path, node): 555 | """ 556 | Adds an error or sub-tree to :attr:tree. 557 | 558 | :param path: Path to the error. 559 | :type path: Tuple of strings and integers. 560 | :param node: An error message or a sub-tree. 561 | :type node: String or dictionary. 562 | """ 563 | field = path[0] 564 | if len(path) == 1: 565 | if field in self.tree: 566 | subtree = self.tree[field].pop() 567 | self.tree[field] += [node, subtree] 568 | else: 569 | self.tree[field] = [node, {}] 570 | elif len(path) >= 1: 571 | if field not in self.tree: 572 | self.tree[field] = [{}] 573 | subtree = self.tree[field][-1] 574 | 575 | if subtree: 576 | new = self.__class__(tree=copy(subtree)) 577 | else: 578 | new = self.__class__() 579 | new._insert_error(path[1:], node) 580 | subtree.update(new.tree) 581 | 582 | def _insert_group_error(self, error): 583 | for child_error in error.child_errors: 584 | if child_error.is_logic_error: 585 | self._insert_logic_error(child_error) 586 | elif child_error.is_group_error: 587 | self._insert_group_error(child_error) 588 | else: 589 | self._insert_error( 590 | child_error.document_path, 591 | self._format_message(child_error.field, child_error), 592 | ) 593 | 594 | def _insert_logic_error(self, error): 595 | field = error.field 596 | self._insert_error(error.document_path, self._format_message(field, error)) 597 | 598 | for definition_errors in error.definitions_errors.values(): 599 | for child_error in definition_errors: 600 | if child_error.is_logic_error: 601 | self._insert_logic_error(child_error) 602 | elif child_error.is_group_error: 603 | self._insert_group_error(child_error) 604 | else: 605 | self._insert_error( 606 | child_error.document_path, 607 | self._format_message(field, child_error), 608 | ) 609 | 610 | def _purge_empty_dicts(self, error_list): 611 | subtree = error_list[-1] 612 | if not error_list[-1]: 613 | error_list.pop() 614 | else: 615 | for key in subtree: 616 | self._purge_empty_dicts(subtree[key]) 617 | 618 | def _rewrite_error_path(self, error, offset=0): 619 | """ 620 | Recursively rewrites the error path to correctly represent logic errors 621 | """ 622 | if error.is_logic_error: 623 | self._rewrite_logic_error_path(error, offset) 624 | elif error.is_group_error: 625 | self._rewrite_group_error_path(error, offset) 626 | 627 | def _rewrite_group_error_path(self, error, offset=0): 628 | child_start = len(error.document_path) - offset 629 | 630 | for child_error in error.child_errors: 631 | relative_path = child_error.document_path[child_start:] 632 | child_error.document_path = error.document_path + relative_path 633 | 634 | self._rewrite_error_path(child_error, offset) 635 | 636 | def _rewrite_logic_error_path(self, error, offset=0): 637 | child_start = len(error.document_path) - offset 638 | 639 | for i, definition_errors in error.definitions_errors.items(): 640 | if not definition_errors: 641 | continue 642 | 643 | nodename = '%s definition %s' % (error.rule, i) 644 | path = error.document_path + (nodename,) 645 | 646 | for child_error in definition_errors: 647 | rel_path = child_error.document_path[child_start:] 648 | child_error.document_path = path + rel_path 649 | 650 | self._rewrite_error_path(child_error, offset + 1) 651 | 652 | 653 | class SchemaErrorHandler(BasicErrorHandler): 654 | messages = BasicErrorHandler.messages.copy() 655 | messages[0x03] = "unknown rule" 656 | -------------------------------------------------------------------------------- /cerberus/platform.py: -------------------------------------------------------------------------------- 1 | """ Platform-dependent objects """ 2 | 3 | import sys 4 | 5 | if sys.flags.optimize == 2: 6 | raise RuntimeError("Cerberus can't be run with Python's optimization level 2.") 7 | 8 | 9 | if sys.version_info < (3,): 10 | _int_types = (int, long) # noqa: F821 11 | _str_type = basestring # noqa: F821 12 | else: 13 | _int_types = (int,) 14 | _str_type = str 15 | 16 | 17 | if sys.version_info < (3, 3): 18 | from collections import ( 19 | Callable, 20 | Container, 21 | Hashable, 22 | Iterable, 23 | Mapping, 24 | MutableMapping, 25 | Sequence, 26 | Set, 27 | Sized, 28 | ) 29 | else: 30 | from collections.abc import ( 31 | Callable, 32 | Container, 33 | Hashable, 34 | Iterable, 35 | Mapping, 36 | MutableMapping, 37 | Sequence, 38 | Set, 39 | Sized, 40 | ) 41 | 42 | if sys.version_info < (3, 8): 43 | import importlib_metadata 44 | else: 45 | import importlib.metadata as importlib_metadata 46 | 47 | 48 | __all__ = ( 49 | "_int_types", 50 | "_str_type", 51 | "importlib_metadata", 52 | Callable.__name__, 53 | Container.__name__, 54 | Hashable.__name__, 55 | Iterable.__name__, 56 | Mapping.__name__, 57 | MutableMapping.__name__, 58 | Sequence.__name__, 59 | Set.__name__, 60 | Sized.__name__, 61 | ) 62 | -------------------------------------------------------------------------------- /cerberus/schema.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from warnings import warn 4 | 5 | from cerberus import errors 6 | from cerberus.platform import ( 7 | _str_type, 8 | Callable, 9 | Hashable, 10 | Mapping, 11 | MutableMapping, 12 | Sequence, 13 | ) 14 | from cerberus.utils import ( 15 | get_Validator_class, 16 | validator_factory, 17 | mapping_hash, 18 | TypeDefinition, 19 | ) 20 | 21 | 22 | class _Abort(Exception): 23 | pass 24 | 25 | 26 | class SchemaError(Exception): 27 | """ 28 | Raised when the validation schema is missing, has the wrong format or contains 29 | errors.""" 30 | 31 | pass 32 | 33 | 34 | class DefinitionSchema(MutableMapping): 35 | """A dict-subclass for caching of validated schemas.""" 36 | 37 | def __new__(cls, *args, **kwargs): 38 | if 'SchemaValidator' not in globals(): 39 | global SchemaValidator 40 | SchemaValidator = validator_factory('SchemaValidator', SchemaValidatorMixin) 41 | types_mapping = SchemaValidator.types_mapping.copy() 42 | types_mapping.update( 43 | { 44 | 'callable': TypeDefinition('callable', (Callable,), ()), 45 | 'hashable': TypeDefinition('hashable', (Hashable,), ()), 46 | } 47 | ) 48 | SchemaValidator.types_mapping = types_mapping 49 | 50 | return super(DefinitionSchema, cls).__new__(cls) 51 | 52 | def __init__(self, validator, schema): 53 | """ 54 | :param validator: An instance of Validator-(sub-)class that uses this 55 | schema. 56 | :param schema: A definition-schema as ``dict``. Defaults to an empty 57 | one. 58 | """ 59 | if not isinstance(validator, get_Validator_class()): 60 | raise RuntimeError('validator argument must be a Validator-' 'instance.') 61 | self.validator = validator 62 | 63 | if isinstance(schema, _str_type): 64 | schema = validator.schema_registry.get(schema, schema) 65 | 66 | if not isinstance(schema, Mapping): 67 | try: 68 | schema = dict(schema) 69 | except Exception: 70 | raise SchemaError(errors.SCHEMA_ERROR_DEFINITION_TYPE.format(schema)) 71 | 72 | self.validation_schema = SchemaValidationSchema(validator) 73 | self.schema_validator = SchemaValidator( 74 | None, 75 | allow_unknown=self.validation_schema, 76 | error_handler=errors.SchemaErrorHandler, 77 | target_schema=schema, 78 | target_validator=validator, 79 | ) 80 | 81 | schema = self.expand(schema) 82 | self.validate(schema) 83 | self.schema = schema 84 | 85 | def __delitem__(self, key): 86 | _new_schema = self.schema.copy() 87 | try: 88 | del _new_schema[key] 89 | except ValueError: 90 | raise SchemaError("Schema has no field '%s' defined" % key) 91 | except Exception as e: 92 | raise e 93 | else: 94 | del self.schema[key] 95 | 96 | def __getitem__(self, item): 97 | return self.schema[item] 98 | 99 | def __iter__(self): 100 | return iter(self.schema) 101 | 102 | def __len__(self): 103 | return len(self.schema) 104 | 105 | def __repr__(self): 106 | return str(self) 107 | 108 | def __setitem__(self, key, value): 109 | value = self.expand({0: value})[0] 110 | self.validate({key: value}) 111 | self.schema[key] = value 112 | 113 | def __str__(self): 114 | if hasattr(self, "schema"): 115 | return str(self.schema) 116 | else: 117 | return "No schema data is set yet." 118 | 119 | def copy(self): 120 | return self.__class__(self.validator, self.schema.copy()) 121 | 122 | @classmethod 123 | def expand(cls, schema): 124 | try: 125 | schema = cls._expand_logical_shortcuts(schema) 126 | schema = cls._expand_subschemas(schema) 127 | except Exception: 128 | pass 129 | 130 | # TODO remove this with the next major release 131 | schema = cls._rename_deprecated_rulenames(schema) 132 | 133 | return schema 134 | 135 | @classmethod 136 | def _expand_logical_shortcuts(cls, schema): 137 | """ 138 | Expand agglutinated rules in a definition-schema. 139 | 140 | :param schema: The schema-definition to expand. 141 | :return: The expanded schema-definition. 142 | """ 143 | 144 | def is_of_rule(x): 145 | return isinstance(x, _str_type) and x.startswith( 146 | ('allof_', 'anyof_', 'noneof_', 'oneof_') 147 | ) 148 | 149 | for field, rules in schema.items(): 150 | for of_rule in [x for x in rules if is_of_rule(x)]: 151 | operator, rule = of_rule.split('_', 1) 152 | rules.update({operator: []}) 153 | for value in rules[of_rule]: 154 | rules[operator].append({rule: value}) 155 | del rules[of_rule] 156 | return schema 157 | 158 | @classmethod 159 | def _expand_subschemas(cls, schema): 160 | def has_schema_rule(): 161 | return isinstance(schema[field], Mapping) and 'schema' in schema[field] 162 | 163 | def has_mapping_schema(): 164 | """ 165 | Tries to determine heuristically if the schema-constraints are aimed to 166 | mappings. 167 | """ 168 | try: 169 | return all( 170 | isinstance(x, Mapping) for x in schema[field]['schema'].values() 171 | ) 172 | except TypeError: 173 | return False 174 | 175 | for field in schema: 176 | if not has_schema_rule(): 177 | pass 178 | elif has_mapping_schema(): 179 | schema[field]['schema'] = cls.expand(schema[field]['schema']) 180 | else: # assumes schema-constraints for a sequence 181 | schema[field]['schema'] = cls.expand({0: schema[field]['schema']})[0] 182 | 183 | # TODO remove the last two values in the tuple with the next major release 184 | for rule in ('keysrules', 'valuesrules', 'keyschema', 'valueschema'): 185 | if rule in schema[field]: 186 | schema[field][rule] = cls.expand({0: schema[field][rule]})[0] 187 | 188 | for rule in ('allof', 'anyof', 'items', 'noneof', 'oneof'): 189 | if rule in schema[field]: 190 | if not isinstance(schema[field][rule], Sequence): 191 | continue 192 | new_rules_definition = [] 193 | for item in schema[field][rule]: 194 | new_rules_definition.append(cls.expand({0: item})[0]) 195 | schema[field][rule] = new_rules_definition 196 | return schema 197 | 198 | def get(self, item, default=None): 199 | return self.schema.get(item, default) 200 | 201 | def items(self): 202 | return self.schema.items() 203 | 204 | def update(self, schema): 205 | try: 206 | schema = self.expand(schema) 207 | _new_schema = self.schema.copy() 208 | _new_schema.update(schema) 209 | self.validate(_new_schema) 210 | except ValueError: 211 | raise SchemaError(errors.SCHEMA_ERROR_DEFINITION_TYPE.format(schema)) 212 | except Exception as e: 213 | raise e 214 | else: 215 | self.schema = _new_schema 216 | 217 | # TODO remove with next major release 218 | @staticmethod 219 | def _rename_deprecated_rulenames(schema): 220 | for field, rules in schema.items(): 221 | if isinstance(rules, str): # registry reference 222 | continue 223 | 224 | for old, new in ( 225 | ('keyschema', 'keysrules'), 226 | ('validator', 'check_with'), 227 | ('valueschema', 'valuesrules'), 228 | ): 229 | if old not in rules: 230 | continue 231 | 232 | if new in rules: 233 | raise RuntimeError( 234 | "The rule '{new}' is also present with its old " 235 | "name '{old}' in the same set of rules." 236 | ) 237 | 238 | warn( 239 | "The rule '{old}' was renamed to '{new}'. The old name will " 240 | "not be available in the next major release of " 241 | "Cerberus.".format(old=old, new=new), 242 | DeprecationWarning, 243 | ) 244 | schema[field][new] = schema[field][old] 245 | schema[field].pop(old) 246 | 247 | return schema 248 | 249 | def regenerate_validation_schema(self): 250 | self.validation_schema = SchemaValidationSchema(self.validator) 251 | 252 | def validate(self, schema=None): 253 | """ 254 | Validates a schema that defines rules against supported rules. 255 | 256 | :param schema: The schema to be validated as a legal cerberus schema 257 | according to the rules of the assigned Validator object. 258 | Raises a :class:`~cerberus.base.SchemaError` when an invalid 259 | schema is encountered. 260 | """ 261 | if schema is None: 262 | schema = self.schema 263 | _hash = (mapping_hash(schema), mapping_hash(self.validator.types_mapping)) 264 | if _hash not in self.validator._valid_schemas: 265 | self._validate(schema) 266 | self.validator._valid_schemas.add(_hash) 267 | 268 | def _validate(self, schema): 269 | if isinstance(schema, _str_type): 270 | schema = self.validator.schema_registry.get(schema, schema) 271 | 272 | test_schema = {} 273 | for field, rules in schema.items(): 274 | if isinstance(rules, _str_type): 275 | test_schema[field] = rules_set_registry.get(rules, rules) 276 | else: 277 | test_rules = {} 278 | for rule, constraint in rules.items(): 279 | test_rules[rule.replace(" ", "_")] = constraint 280 | test_schema[field] = test_rules 281 | 282 | if not self.schema_validator(test_schema, normalize=False): 283 | raise SchemaError(self.schema_validator.errors) 284 | 285 | 286 | class UnvalidatedSchema(DefinitionSchema): 287 | def __init__(self, schema={}): 288 | if not isinstance(schema, Mapping): 289 | schema = dict(schema) 290 | self.schema = schema 291 | 292 | def validate(self, schema): 293 | pass 294 | 295 | def copy(self): 296 | # Override ancestor's copy, because 297 | # UnvalidatedSchema does not have .validator: 298 | return self.__class__(self.schema.copy()) 299 | 300 | 301 | class SchemaValidationSchema(UnvalidatedSchema): 302 | def __init__(self, validator): 303 | self.schema = { 304 | 'allow_unknown': False, 305 | 'schema': validator.rules, 306 | 'type': 'dict', 307 | } 308 | 309 | 310 | class SchemaValidatorMixin(object): 311 | """ 312 | This validator mixin provides mechanics to validate schemas passed to a Cerberus 313 | validator. 314 | """ 315 | 316 | def __init__(self, *args, **kwargs): 317 | kwargs.setdefault('known_rules_set_refs', set()) 318 | kwargs.setdefault('known_schema_refs', set()) 319 | super(SchemaValidatorMixin, self).__init__(*args, **kwargs) 320 | 321 | @property 322 | def known_rules_set_refs(self): 323 | """The encountered references to rules set registry items.""" 324 | return self._config['known_rules_set_refs'] 325 | 326 | @property 327 | def known_schema_refs(self): 328 | """The encountered references to schema registry items.""" 329 | return self._config['known_schema_refs'] 330 | 331 | @property 332 | def target_schema(self): 333 | """The schema that is being validated.""" 334 | return self._config['target_schema'] 335 | 336 | @property 337 | def target_validator(self): 338 | """The validator whose schema is being validated.""" 339 | return self._config['target_validator'] 340 | 341 | def _check_with_bulk_schema(self, field, value): 342 | # resolve schema registry reference 343 | if isinstance(value, _str_type): 344 | if value in self.known_rules_set_refs: 345 | return 346 | else: 347 | self.known_rules_set_refs.add(value) 348 | definition = self.target_validator.rules_set_registry.get(value) 349 | if definition is None: 350 | self._error(field, 'Rules set definition %s not found.' % value) 351 | return 352 | else: 353 | value = definition 354 | 355 | _hash = ( 356 | mapping_hash({'turing': value}), 357 | mapping_hash(self.target_validator.types_mapping), 358 | ) 359 | if _hash in self.target_validator._valid_schemas: 360 | return 361 | 362 | validator = self._get_child_validator( 363 | document_crumb=field, 364 | allow_unknown=False, 365 | schema=self.target_validator.rules, 366 | ) 367 | validator(value, normalize=False) 368 | if validator._errors: 369 | self._error(validator._errors) 370 | else: 371 | self.target_validator._valid_schemas.add(_hash) 372 | 373 | def _check_with_dependencies(self, field, value): 374 | if isinstance(value, _str_type): 375 | pass 376 | elif isinstance(value, Mapping): 377 | validator = self._get_child_validator( 378 | document_crumb=field, 379 | schema={'valuesrules': {'type': 'list'}}, 380 | allow_unknown=True, 381 | ) 382 | if not validator(value, normalize=False): 383 | self._error(validator._errors) 384 | elif isinstance(value, Sequence): 385 | if not all(isinstance(x, Hashable) for x in value): 386 | path = self.document_path + (field,) 387 | self._error(path, 'All dependencies must be a hashable type.') 388 | 389 | def _check_with_items(self, field, value): 390 | for i, schema in enumerate(value): 391 | self._check_with_bulk_schema((field, i), schema) 392 | 393 | def _check_with_schema(self, field, value): 394 | try: 395 | value = self._handle_schema_reference_for_validator(field, value) 396 | except _Abort: 397 | return 398 | 399 | _hash = (mapping_hash(value), mapping_hash(self.target_validator.types_mapping)) 400 | if _hash in self.target_validator._valid_schemas: 401 | return 402 | 403 | validator = self._get_child_validator( 404 | document_crumb=field, schema=None, allow_unknown=self.root_allow_unknown 405 | ) 406 | validator(self._expand_rules_set_refs(value), normalize=False) 407 | if validator._errors: 408 | self._error(validator._errors) 409 | else: 410 | self.target_validator._valid_schemas.add(_hash) 411 | 412 | def _check_with_type(self, field, value): 413 | value = set((value,)) if isinstance(value, _str_type) else set(value) 414 | invalid_constraints = value - set(self.target_validator.types) 415 | if invalid_constraints: 416 | self._error( 417 | field, 'Unsupported types: {}'.format(', '.join(invalid_constraints)) 418 | ) 419 | 420 | def _expand_rules_set_refs(self, schema): 421 | result = {} 422 | for k, v in schema.items(): 423 | if isinstance(v, _str_type): 424 | result[k] = self.target_validator.rules_set_registry.get(v) 425 | else: 426 | result[k] = v 427 | return result 428 | 429 | def _handle_schema_reference_for_validator(self, field, value): 430 | if not isinstance(value, _str_type): 431 | return value 432 | if value in self.known_schema_refs: 433 | raise _Abort 434 | 435 | self.known_schema_refs.add(value) 436 | definition = self.target_validator.schema_registry.get(value) 437 | if definition is None: 438 | path = self.document_path + (field,) 439 | self._error(path, 'Schema definition {} not found.'.format(value)) 440 | raise _Abort 441 | return definition 442 | 443 | def _validate_logical(self, rule, field, value): 444 | """{'allowed': ('allof', 'anyof', 'noneof', 'oneof')}""" 445 | if not isinstance(value, Sequence): 446 | self._error(field, errors.BAD_TYPE) 447 | return 448 | 449 | validator = self._get_child_validator( 450 | document_crumb=rule, 451 | allow_unknown=False, 452 | schema=self.target_validator.validation_rules, 453 | ) 454 | 455 | for constraints in value: 456 | _hash = ( 457 | mapping_hash({'turing': constraints}), 458 | mapping_hash(self.target_validator.types_mapping), 459 | ) 460 | if _hash in self.target_validator._valid_schemas: 461 | continue 462 | 463 | validator(constraints, normalize=False) 464 | if validator._errors: 465 | self._error(validator._errors) 466 | else: 467 | self.target_validator._valid_schemas.add(_hash) 468 | 469 | 470 | #### 471 | 472 | 473 | class Registry(object): 474 | """ 475 | A registry to store and retrieve schemas and parts of it by a name that can be used 476 | in validation schemas. 477 | 478 | :param definitions: Optional, initial definitions. 479 | :type definitions: any :term:`mapping` 480 | """ 481 | 482 | def __init__(self, definitions={}): 483 | self._storage = {} 484 | self.extend(definitions) 485 | 486 | def add(self, name, definition): 487 | """ 488 | Register a definition to the registry. Existing definitions are replaced 489 | silently. 490 | 491 | :param name: The name which can be used as reference in a validation 492 | schema. 493 | :type name: :class:`str` 494 | :param definition: The definition. 495 | :type definition: any :term:`mapping` 496 | """ 497 | self._storage[name] = self._expand_definition(definition) 498 | 499 | def all(self): 500 | """ 501 | Returns a :class:`dict` with all registered definitions mapped to their name. 502 | """ 503 | return self._storage 504 | 505 | def clear(self): 506 | """Purge all definitions in the registry.""" 507 | self._storage.clear() 508 | 509 | def extend(self, definitions): 510 | """ 511 | Add several definitions at once. Existing definitions are 512 | replaced silently. 513 | 514 | :param definitions: The names and definitions. 515 | :type definitions: a :term:`mapping` or an :term:`iterable` with 516 | two-value :class:`tuple` s 517 | """ 518 | for name, definition in dict(definitions).items(): 519 | self.add(name, definition) 520 | 521 | def get(self, name, default=None): 522 | """ 523 | Retrieve a definition from the registry. 524 | 525 | :param name: The reference that points to the definition. 526 | :type name: :class:`str` 527 | :param default: Return value if the reference isn't registered. 528 | """ 529 | return self._storage.get(name, default) 530 | 531 | def remove(self, *names): 532 | """ 533 | Unregister definitions from the registry. 534 | 535 | :param names: The names of the definitions that are to be 536 | unregistered. 537 | """ 538 | for name in names: 539 | self._storage.pop(name, None) 540 | 541 | 542 | class SchemaRegistry(Registry): 543 | @classmethod 544 | def _expand_definition(cls, definition): 545 | return DefinitionSchema.expand(definition) 546 | 547 | 548 | class RulesSetRegistry(Registry): 549 | @classmethod 550 | def _expand_definition(cls, definition): 551 | return DefinitionSchema.expand({0: definition})[0] 552 | 553 | 554 | schema_registry, rules_set_registry = SchemaRegistry(), RulesSetRegistry() 555 | -------------------------------------------------------------------------------- /cerberus/tests/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import re 4 | 5 | import pytest 6 | 7 | from cerberus import errors, Validator, SchemaError, DocumentError 8 | from cerberus.tests.conftest import sample_schema 9 | 10 | 11 | def assert_exception(exception, document={}, schema=None, validator=None, msg=None): 12 | """ 13 | Tests whether a specific exception is raised. Optionally also tests whether the 14 | exception message is as expected. 15 | """ 16 | if validator is None: 17 | validator = Validator() 18 | if msg is None: 19 | with pytest.raises(exception): 20 | validator(document, schema) 21 | else: 22 | with pytest.raises(exception, match=re.escape(msg)): 23 | validator(document, schema) 24 | 25 | 26 | def assert_schema_error(*args): 27 | """Tests whether a validation raises an exception due to a malformed schema.""" 28 | assert_exception(SchemaError, *args) 29 | 30 | 31 | def assert_document_error(*args): 32 | """Tests whether a validation raises an exception due to a malformed document.""" 33 | assert_exception(DocumentError, *args) 34 | 35 | 36 | def assert_fail( 37 | document, 38 | schema=None, 39 | validator=None, 40 | update=False, 41 | error=None, 42 | errors=None, 43 | child_errors=None, 44 | ): 45 | """Tests whether a validation fails.""" 46 | if validator is None: 47 | validator = Validator(sample_schema) 48 | result = validator(document, schema, update) 49 | assert isinstance(result, bool) 50 | assert not result 51 | 52 | actual_errors = validator._errors 53 | 54 | assert not (error is not None and errors is not None) 55 | assert not (errors is not None and child_errors is not None), ( 56 | 'child_errors can only be tested in ' 'conjunction with the error parameter' 57 | ) 58 | assert not (child_errors is not None and error is None) 59 | if error is not None: 60 | assert len(actual_errors) == 1 61 | assert_has_error(actual_errors, *error) 62 | 63 | if child_errors is not None: 64 | assert len(actual_errors[0].child_errors) == len(child_errors) 65 | assert_has_errors(actual_errors[0].child_errors, child_errors) 66 | 67 | elif errors is not None: 68 | assert len(actual_errors) == len(errors) 69 | assert_has_errors(actual_errors, errors) 70 | 71 | return actual_errors 72 | 73 | 74 | def assert_success(document, schema=None, validator=None, update=False): 75 | """Tests whether a validation succeeds.""" 76 | if validator is None: 77 | validator = Validator(sample_schema) 78 | result = validator(document, schema, update) 79 | assert isinstance(result, bool) 80 | if not result: 81 | raise AssertionError(validator.errors) 82 | 83 | 84 | def assert_has_error(_errors, d_path, s_path, error_def, constraint, info=()): 85 | if not isinstance(d_path, tuple): 86 | d_path = (d_path,) 87 | if not isinstance(info, tuple): 88 | info = (info,) 89 | 90 | assert isinstance(_errors, errors.ErrorList) 91 | 92 | for i, error in enumerate(_errors): 93 | assert isinstance(error, errors.ValidationError) 94 | try: 95 | assert error.document_path == d_path 96 | assert error.schema_path == s_path 97 | assert error.code == error_def.code 98 | assert error.rule == error_def.rule 99 | assert error.constraint == constraint 100 | if not error.is_group_error: 101 | assert error.info == info 102 | except AssertionError: 103 | pass 104 | except Exception: 105 | raise 106 | else: 107 | break 108 | else: 109 | raise AssertionError( 110 | """ 111 | Error with properties: 112 | document_path={doc_path} 113 | schema_path={schema_path} 114 | code={code} 115 | constraint={constraint} 116 | info={info} 117 | not found in errors: 118 | {errors} 119 | """.format( 120 | doc_path=d_path, 121 | schema_path=s_path, 122 | code=hex(error.code), 123 | info=info, 124 | constraint=constraint, 125 | errors=_errors, 126 | ) 127 | ) 128 | return i 129 | 130 | 131 | def assert_has_errors(_errors, _exp_errors): 132 | assert isinstance(_exp_errors, list) 133 | for error in _exp_errors: 134 | assert isinstance(error, tuple) 135 | assert_has_error(_errors, *error) 136 | 137 | 138 | def assert_not_has_error(_errors, *args, **kwargs): 139 | try: 140 | assert_has_error(_errors, *args, **kwargs) 141 | except AssertionError: 142 | pass 143 | except Exception as e: 144 | raise e 145 | else: 146 | raise AssertionError('An unexpected error occurred.') 147 | 148 | 149 | def assert_bad_type(field, data_type, value): 150 | assert_fail( 151 | {field: value}, error=(field, (field, 'type'), errors.BAD_TYPE, data_type) 152 | ) 153 | 154 | 155 | def assert_normalized(document, expected, schema=None, validator=None): 156 | if validator is None: 157 | validator = Validator(sample_schema) 158 | assert_success(document, schema, validator) 159 | assert validator.document == expected 160 | -------------------------------------------------------------------------------- /cerberus/tests/conftest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from copy import deepcopy 4 | 5 | import pytest 6 | 7 | from cerberus import Validator 8 | 9 | 10 | @pytest.fixture 11 | def document(): 12 | return deepcopy(sample_document) 13 | 14 | 15 | @pytest.fixture 16 | def schema(): 17 | return deepcopy(sample_schema) 18 | 19 | 20 | @pytest.fixture 21 | def validator(): 22 | return Validator(sample_schema) 23 | 24 | 25 | sample_schema = { 26 | 'a_string': {'type': 'string', 'minlength': 2, 'maxlength': 10}, 27 | 'a_binary': {'type': 'binary', 'minlength': 2, 'maxlength': 10}, 28 | 'a_nullable_integer': {'type': 'integer', 'nullable': True}, 29 | 'an_integer': {'type': 'integer', 'min': 1, 'max': 100}, 30 | 'a_restricted_integer': {'type': 'integer', 'allowed': [-1, 0, 1]}, 31 | 'a_boolean': {'type': 'boolean', 'meta': 'can haz two distinct states'}, 32 | 'a_datetime': {'type': 'datetime', 'meta': {'format': '%a, %d. %b %Y'}}, 33 | 'a_float': {'type': 'float', 'min': 1, 'max': 100}, 34 | 'a_number': {'type': 'number', 'min': 1, 'max': 100}, 35 | 'a_set': {'type': 'set'}, 36 | 'one_or_more_strings': {'type': ['string', 'list'], 'schema': {'type': 'string'}}, 37 | 'a_regex_email': { 38 | 'type': 'string', 39 | 'regex': r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$', 40 | }, 41 | 'a_readonly_string': {'type': 'string', 'readonly': True}, 42 | 'a_restricted_string': {'type': 'string', 'allowed': ['agent', 'client', 'vendor']}, 43 | 'an_array': {'type': 'list', 'allowed': ['agent', 'client', 'vendor']}, 44 | 'an_array_from_set': { 45 | 'type': 'list', 46 | 'allowed': set(['agent', 'client', 'vendor']), 47 | }, 48 | 'a_list_of_dicts': { 49 | 'type': 'list', 50 | 'schema': { 51 | 'type': 'dict', 52 | 'schema': { 53 | 'sku': {'type': 'string'}, 54 | 'price': {'type': 'integer', 'required': True}, 55 | }, 56 | }, 57 | }, 58 | 'a_list_of_values': { 59 | 'type': 'list', 60 | 'items': [{'type': 'string'}, {'type': 'integer'}], 61 | }, 62 | 'a_list_of_integers': {'type': 'list', 'schema': {'type': 'integer'}}, 63 | 'a_dict': { 64 | 'type': 'dict', 65 | 'schema': { 66 | 'address': {'type': 'string'}, 67 | 'city': {'type': 'string', 'required': True}, 68 | }, 69 | }, 70 | 'a_dict_with_valuesrules': {'type': 'dict', 'valuesrules': {'type': 'integer'}}, 71 | 'a_list_length': { 72 | 'type': 'list', 73 | 'schema': {'type': 'integer'}, 74 | 'minlength': 2, 75 | 'maxlength': 5, 76 | }, 77 | 'a_nullable_field_without_type': {'nullable': True}, 78 | 'a_not_nullable_field_without_type': {}, 79 | } 80 | 81 | sample_document = {'name': 'john doe'} 82 | -------------------------------------------------------------------------------- /cerberus/tests/test_assorted.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import sys 4 | from decimal import Decimal 5 | 6 | from pytest import mark 7 | 8 | from cerberus import TypeDefinition, Validator 9 | from cerberus.tests import assert_fail, assert_success 10 | from cerberus.utils import validator_factory 11 | from cerberus.validator import BareValidator 12 | from cerberus.platform import importlib_metadata 13 | 14 | 15 | if (3,) < sys.version_info < (3, 4): 16 | from imp import reload 17 | elif sys.version_info >= (3, 4): 18 | from importlib import reload 19 | else: 20 | pass # Python 2.x 21 | 22 | 23 | def test_pkgresources_version(monkeypatch): 24 | def return_fake_version(name): 25 | assert name == "Cerberus" 26 | return "1.2.3" 27 | 28 | with monkeypatch.context() as m: 29 | cerberus = __import__("cerberus") 30 | m.setattr("cerberus.importlib_metadata.version", return_fake_version) 31 | reload(cerberus) 32 | assert cerberus.__version__ == "1.2.3" 33 | 34 | 35 | def test_version_not_found(monkeypatch): 36 | def raise_package_not_found_error(name): 37 | assert name == "Cerberus" 38 | raise importlib_metadata.PackageNotFoundError 39 | 40 | with monkeypatch.context() as m: 41 | cerberus = __import__("cerberus") 42 | m.setattr("cerberus.importlib_metadata.version", raise_package_not_found_error) 43 | reload(cerberus) 44 | assert cerberus.__version__ == "unknown" 45 | 46 | 47 | def test_clear_cache(validator): 48 | assert len(validator._valid_schemas) > 0 49 | validator.clear_caches() 50 | assert len(validator._valid_schemas) == 0 51 | 52 | 53 | def test_docstring(validator): 54 | assert validator.__doc__ 55 | 56 | 57 | # Test that testing with the sample schema works as expected 58 | # as there might be rules with side-effects in it 59 | 60 | 61 | @mark.parametrize( 62 | "test,document", 63 | ((assert_fail, {"an_integer": 60}), (assert_success, {"an_integer": 110})), 64 | ) 65 | def test_that_test_fails(test, document): 66 | try: 67 | test(document) 68 | except AssertionError: 69 | pass 70 | else: 71 | raise AssertionError("test didn't fail") 72 | 73 | 74 | def test_dynamic_types(): 75 | decimal_type = TypeDefinition("decimal", (Decimal,), ()) 76 | document = {"measurement": Decimal(0)} 77 | schema = {"measurement": {"type": "decimal"}} 78 | 79 | validator = Validator() 80 | validator.types_mapping["decimal"] = decimal_type 81 | assert_success(document, schema, validator) 82 | 83 | class MyValidator(Validator): 84 | types_mapping = Validator.types_mapping.copy() 85 | types_mapping["decimal"] = decimal_type 86 | 87 | validator = MyValidator() 88 | assert_success(document, schema, validator) 89 | 90 | 91 | def test_mro(): 92 | assert Validator.__mro__ == (Validator, BareValidator, object), Validator.__mro__ 93 | 94 | 95 | def test_mixin_init(): 96 | class Mixin(object): 97 | def __init__(self, *args, **kwargs): 98 | kwargs["test"] = True 99 | super(Mixin, self).__init__(*args, **kwargs) 100 | 101 | MyValidator = validator_factory("MyValidator", Mixin) 102 | validator = MyValidator() 103 | assert validator._config["test"] 104 | 105 | 106 | def test_sub_init(): 107 | class MyValidator(Validator): 108 | def __init__(self, *args, **kwargs): 109 | kwargs["test"] = True 110 | super(MyValidator, self).__init__(*args, **kwargs) 111 | 112 | validator = MyValidator() 113 | assert validator._config["test"] 114 | -------------------------------------------------------------------------------- /cerberus/tests/test_customization.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import pytest 4 | 5 | import cerberus 6 | from cerberus.tests import assert_fail, assert_success 7 | from cerberus.tests.conftest import sample_schema 8 | 9 | 10 | def test_contextual_data_preservation(): 11 | with pytest.deprecated_call(): 12 | 13 | class InheritedValidator(cerberus.Validator): 14 | def __init__(self, *args, **kwargs): 15 | if 'working_dir' in kwargs: 16 | self.working_dir = kwargs['working_dir'] 17 | super(InheritedValidator, self).__init__(*args, **kwargs) 18 | 19 | def _validate_type_test(self, value): 20 | if self.working_dir: 21 | return True 22 | 23 | assert 'test' in InheritedValidator.types 24 | v = InheritedValidator( 25 | {'test': {'type': 'list', 'schema': {'type': 'test'}}}, working_dir='/tmp' 26 | ) 27 | assert_success({'test': ['foo']}, validator=v) 28 | 29 | 30 | def test_docstring_parsing(): 31 | class CustomValidator(cerberus.Validator): 32 | def _validate_foo(self, argument, field, value): 33 | """{'type': 'zap'}""" 34 | pass 35 | 36 | def _validate_bar(self, value): 37 | """ 38 | Test the barreness of a value. 39 | 40 | The rule's arguments are validated against this schema: 41 | {'type': 'boolean'} 42 | """ 43 | pass 44 | 45 | assert 'foo' in CustomValidator.validation_rules 46 | assert 'bar' in CustomValidator.validation_rules 47 | 48 | 49 | # TODO remove 'validator' as rule parameter with the next major release 50 | @pytest.mark.parametrize('rule', ('check_with', 'validator')) 51 | def test_check_with_method(rule): 52 | # https://github.com/pyeve/cerberus/issues/265 53 | class MyValidator(cerberus.Validator): 54 | def _check_with_oddity(self, field, value): 55 | if not value & 1: 56 | self._error(field, "Must be an odd number") 57 | 58 | if rule == "validator": 59 | with pytest.deprecated_call(): 60 | v = MyValidator(schema={'amount': {rule: 'oddity'}}) 61 | else: 62 | v = MyValidator(schema={'amount': {rule: 'oddity'}}) 63 | 64 | assert_success(document={'amount': 1}, validator=v) 65 | assert_fail( 66 | document={'amount': 2}, 67 | validator=v, 68 | error=('amount', (), cerberus.errors.CUSTOM, None, ('Must be an odd number',)), 69 | ) 70 | 71 | 72 | # TODO remove test with the next major release 73 | @pytest.mark.parametrize('rule', ('check_with', 'validator')) 74 | def test_validator_method(rule): 75 | class MyValidator(cerberus.Validator): 76 | def _validator_oddity(self, field, value): 77 | if not value & 1: 78 | self._error(field, "Must not be an odd number") 79 | 80 | if rule == "validator": 81 | with pytest.deprecated_call(): 82 | v = MyValidator(schema={'amount': {rule: 'oddity'}}) 83 | else: 84 | v = MyValidator(schema={'amount': {rule: 'oddity'}}) 85 | 86 | with pytest.deprecated_call(): 87 | assert_success(document={'amount': 1}, validator=v) 88 | assert_fail( 89 | document={'amount': 2}, 90 | validator=v, 91 | error=( 92 | 'amount', 93 | (), 94 | cerberus.errors.CUSTOM, 95 | None, 96 | ('Must not be an odd number',), 97 | ), 98 | ) 99 | 100 | 101 | def test_schema_validation_can_be_disabled_in_schema_setter(): 102 | class NonvalidatingValidator(cerberus.Validator): 103 | """ 104 | Skips schema validation to speed up initialization 105 | """ 106 | 107 | @cerberus.Validator.schema.setter 108 | def schema(self, schema): 109 | if schema is None: 110 | self._schema = None 111 | elif self.is_child: 112 | self._schema = schema 113 | elif isinstance(schema, cerberus.schema.DefinitionSchema): 114 | self._schema = schema 115 | else: 116 | self._schema = cerberus.schema.UnvalidatedSchema(schema) 117 | 118 | v = NonvalidatingValidator(schema=sample_schema) 119 | assert v.validate(document={'an_integer': 1}) 120 | assert not v.validate(document={'an_integer': 'a'}) 121 | -------------------------------------------------------------------------------- /cerberus/tests/test_errors.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from cerberus import Validator, errors 4 | from cerberus.tests import assert_fail 5 | 6 | 7 | ValidationError = errors.ValidationError 8 | 9 | 10 | def test__error_1(): 11 | v = Validator(schema={'foo': {'type': 'string'}}) 12 | v.document = {'foo': 42} 13 | v._error('foo', errors.BAD_TYPE, 'string') 14 | error = v._errors[0] 15 | assert error.document_path == ('foo',) 16 | assert error.schema_path == ('foo', 'type') 17 | assert error.code == 0x24 18 | assert error.rule == 'type' 19 | assert error.constraint == 'string' 20 | assert error.value == 42 21 | assert error.info == ('string',) 22 | assert not error.is_group_error 23 | assert not error.is_logic_error 24 | 25 | 26 | def test__error_2(): 27 | v = Validator(schema={'foo': {'keysrules': {'type': 'integer'}}}) 28 | v.document = {'foo': {'0': 'bar'}} 29 | v._error('foo', errors.KEYSRULES, ()) 30 | error = v._errors[0] 31 | assert error.document_path == ('foo',) 32 | assert error.schema_path == ('foo', 'keysrules') 33 | assert error.code == 0x83 34 | assert error.rule == 'keysrules' 35 | assert error.constraint == {'type': 'integer'} 36 | assert error.value == {'0': 'bar'} 37 | assert error.info == ((),) 38 | assert error.is_group_error 39 | assert not error.is_logic_error 40 | 41 | 42 | def test__error_3(): 43 | valids = [ 44 | {'type': 'string', 'regex': '0x[0-9a-f]{2}'}, 45 | {'type': 'integer', 'min': 0, 'max': 255}, 46 | ] 47 | v = Validator(schema={'foo': {'oneof': valids}}) 48 | v.document = {'foo': '0x100'} 49 | v._error('foo', errors.ONEOF, (), 0, 2) 50 | error = v._errors[0] 51 | assert error.document_path == ('foo',) 52 | assert error.schema_path == ('foo', 'oneof') 53 | assert error.code == 0x92 54 | assert error.rule == 'oneof' 55 | assert error.constraint == valids 56 | assert error.value == '0x100' 57 | assert error.info == ((), 0, 2) 58 | assert error.is_group_error 59 | assert error.is_logic_error 60 | 61 | 62 | def test_error_tree_from_subschema(validator): 63 | schema = {'foo': {'schema': {'bar': {'type': 'string'}}}} 64 | document = {'foo': {'bar': 0}} 65 | assert_fail(document, schema, validator=validator) 66 | d_error_tree = validator.document_error_tree 67 | s_error_tree = validator.schema_error_tree 68 | 69 | assert 'foo' in d_error_tree 70 | 71 | assert len(d_error_tree['foo'].errors) == 1, d_error_tree['foo'] 72 | assert d_error_tree['foo'].errors[0].code == errors.MAPPING_SCHEMA.code 73 | assert 'bar' in d_error_tree['foo'] 74 | assert d_error_tree['foo']['bar'].errors[0].value == 0 75 | assert d_error_tree.fetch_errors_from(('foo', 'bar'))[0].value == 0 76 | 77 | assert 'foo' in s_error_tree 78 | assert 'schema' in s_error_tree['foo'] 79 | assert 'bar' in s_error_tree['foo']['schema'] 80 | assert 'type' in s_error_tree['foo']['schema']['bar'] 81 | assert s_error_tree['foo']['schema']['bar']['type'].errors[0].value == 0 82 | assert ( 83 | s_error_tree.fetch_errors_from(('foo', 'schema', 'bar', 'type'))[0].value == 0 84 | ) 85 | 86 | 87 | def test_error_tree_from_anyof(validator): 88 | schema = {'foo': {'anyof': [{'type': 'string'}, {'type': 'integer'}]}} 89 | document = {'foo': []} 90 | assert_fail(document, schema, validator=validator) 91 | d_error_tree = validator.document_error_tree 92 | s_error_tree = validator.schema_error_tree 93 | assert 'foo' in d_error_tree 94 | assert d_error_tree['foo'].errors[0].value == [] 95 | assert 'foo' in s_error_tree 96 | assert 'anyof' in s_error_tree['foo'] 97 | assert 0 in s_error_tree['foo']['anyof'] 98 | assert 1 in s_error_tree['foo']['anyof'] 99 | assert 'type' in s_error_tree['foo']['anyof'][0] 100 | assert s_error_tree['foo']['anyof'][0]['type'].errors[0].value == [] 101 | 102 | 103 | def test_nested_error_paths(validator): 104 | # interpreters of the same version on some platforms showed different sort results 105 | # over various runs: 106 | def assert_has_all_errors(errors, *ref_errs): 107 | for ref_err in ref_errs: 108 | for error in errors: 109 | if error == ref_err: 110 | break 111 | else: 112 | raise AssertionError 113 | 114 | schema = { 115 | 'a_dict': { 116 | 'keysrules': {'type': 'integer'}, 117 | 'valuesrules': {'regex': '[a-z]*'}, 118 | }, 119 | 'a_list': {'schema': {'type': 'string', 'oneof_regex': ['[a-z]*$', '[A-Z]*']}}, 120 | } 121 | document = { 122 | 'a_dict': {0: 'abc', 'one': 'abc', 2: 'aBc', 'three': 'abC'}, 123 | 'a_list': [0, 'abc', 'abC'], 124 | } 125 | assert_fail(document, schema, validator=validator) 126 | 127 | det = validator.document_error_tree 128 | set = validator.schema_error_tree 129 | 130 | assert len(det.errors) == 0 131 | assert len(set.errors) == 0 132 | 133 | assert len(det['a_dict'].errors) == 2 134 | assert len(set['a_dict'].errors) == 0 135 | 136 | assert det['a_dict'][0] is None 137 | assert len(det['a_dict']['one'].errors) == 1 138 | assert len(det['a_dict'][2].errors) == 1 139 | assert len(det['a_dict']['three'].errors) == 2 140 | 141 | assert len(set['a_dict']['keysrules'].errors) == 1 142 | assert len(set['a_dict']['valuesrules'].errors) == 1 143 | 144 | assert len(set['a_dict']['keysrules']['type'].errors) == 2 145 | assert len(set['a_dict']['valuesrules']['regex'].errors) == 2 146 | 147 | ref_err1 = ValidationError( 148 | ('a_dict', 'one'), 149 | ('a_dict', 'keysrules', 'type'), 150 | errors.BAD_TYPE.code, 151 | 'type', 152 | 'integer', 153 | 'one', 154 | (), 155 | ) 156 | 157 | ref_err2 = ValidationError( 158 | ('a_dict', 2), 159 | ('a_dict', 'valuesrules', 'regex'), 160 | errors.REGEX_MISMATCH.code, 161 | 'regex', 162 | '[a-z]*$', 163 | 'aBc', 164 | (), 165 | ) 166 | 167 | ref_err3 = ValidationError( 168 | ('a_dict', 'three'), 169 | ('a_dict', 'keysrules', 'type'), 170 | errors.BAD_TYPE.code, 171 | 'type', 172 | 'integer', 173 | 'three', 174 | (), 175 | ) 176 | ref_err4 = ValidationError( 177 | ('a_dict', 'three'), 178 | ('a_dict', 'valuesrules', 'regex'), 179 | errors.REGEX_MISMATCH.code, 180 | 'regex', 181 | '[a-z]*$', 182 | 'abC', 183 | (), 184 | ) 185 | assert det['a_dict'][2].errors[0] == ref_err2 186 | assert det['a_dict']['one'].errors[0] == ref_err1 187 | assert_has_all_errors(det['a_dict']['three'].errors, ref_err3, ref_err4) 188 | assert_has_all_errors(set['a_dict']['keysrules']['type'].errors, ref_err1, ref_err3) 189 | assert_has_all_errors( 190 | set['a_dict']['valuesrules']['regex'].errors, ref_err2, ref_err4 191 | ) 192 | 193 | assert len(det['a_list'].errors) == 1 194 | assert len(det['a_list'][0].errors) == 1 195 | assert det['a_list'][1] is None 196 | assert len(det['a_list'][2].errors) == 3 197 | assert len(set['a_list'].errors) == 0 198 | assert len(set['a_list']['schema'].errors) == 1 199 | assert len(set['a_list']['schema']['type'].errors) == 1 200 | assert len(set['a_list']['schema']['oneof'][0]['regex'].errors) == 1 201 | assert len(set['a_list']['schema']['oneof'][1]['regex'].errors) == 1 202 | 203 | ref_err5 = ValidationError( 204 | ('a_list', 0), 205 | ('a_list', 'schema', 'type'), 206 | errors.BAD_TYPE.code, 207 | 'type', 208 | 'string', 209 | 0, 210 | (), 211 | ) 212 | ref_err6 = ValidationError( 213 | ('a_list', 2), 214 | ('a_list', 'schema', 'oneof'), 215 | errors.ONEOF.code, 216 | 'oneof', 217 | 'irrelevant_at_this_point', 218 | 'abC', 219 | (), 220 | ) 221 | ref_err7 = ValidationError( 222 | ('a_list', 2), 223 | ('a_list', 'schema', 'oneof', 0, 'regex'), 224 | errors.REGEX_MISMATCH.code, 225 | 'regex', 226 | '[a-z]*$', 227 | 'abC', 228 | (), 229 | ) 230 | ref_err8 = ValidationError( 231 | ('a_list', 2), 232 | ('a_list', 'schema', 'oneof', 1, 'regex'), 233 | errors.REGEX_MISMATCH.code, 234 | 'regex', 235 | '[a-z]*$', 236 | 'abC', 237 | (), 238 | ) 239 | 240 | assert det['a_list'][0].errors[0] == ref_err5 241 | assert_has_all_errors(det['a_list'][2].errors, ref_err6, ref_err7, ref_err8) 242 | assert set['a_list']['schema']['oneof'].errors[0] == ref_err6 243 | assert set['a_list']['schema']['oneof'][0]['regex'].errors[0] == ref_err7 244 | assert set['a_list']['schema']['oneof'][1]['regex'].errors[0] == ref_err8 245 | assert set['a_list']['schema']['type'].errors[0] == ref_err5 246 | 247 | 248 | def test_path_resolution_for_registry_references(): 249 | class CustomValidator(Validator): 250 | def _normalize_coerce_custom(self, value): 251 | raise Exception("Failed coerce") 252 | 253 | validator = CustomValidator() 254 | validator.schema_registry.add( 255 | "schema1", {"child": {"type": "boolean", "coerce": "custom"}} 256 | ) 257 | validator.schema = {"parent": {"schema": "schema1"}} 258 | validator.validate({"parent": {"child": "["}}) 259 | 260 | expected = { 261 | 'parent': [ 262 | { 263 | 'child': [ 264 | "must be of boolean type", 265 | "field 'child' cannot be coerced: Failed coerce", 266 | ] 267 | } 268 | ] 269 | } 270 | assert validator.errors == expected 271 | 272 | 273 | def test_queries(): 274 | schema = {'foo': {'type': 'dict', 'schema': {'bar': {'type': 'number'}}}} 275 | document = {'foo': {'bar': 'zero'}} 276 | validator = Validator(schema) 277 | validator(document) 278 | 279 | assert 'foo' in validator.document_error_tree 280 | assert 'bar' in validator.document_error_tree['foo'] 281 | assert 'foo' in validator.schema_error_tree 282 | assert 'schema' in validator.schema_error_tree['foo'] 283 | 284 | assert errors.MAPPING_SCHEMA in validator.document_error_tree['foo'].errors 285 | assert errors.MAPPING_SCHEMA in validator.document_error_tree['foo'] 286 | assert errors.BAD_TYPE in validator.document_error_tree['foo']['bar'] 287 | assert errors.MAPPING_SCHEMA in validator.schema_error_tree['foo']['schema'] 288 | assert ( 289 | errors.BAD_TYPE in validator.schema_error_tree['foo']['schema']['bar']['type'] 290 | ) 291 | 292 | assert ( 293 | validator.document_error_tree['foo'][errors.MAPPING_SCHEMA].child_errors[0].code 294 | == errors.BAD_TYPE.code 295 | ) 296 | 297 | 298 | def test_basic_error_handler(): 299 | handler = errors.BasicErrorHandler() 300 | _errors, ref = [], {} 301 | 302 | _errors.append(ValidationError(['foo'], ['foo'], 0x63, 'readonly', True, None, ())) 303 | ref.update({'foo': [handler.messages[0x63]]}) 304 | assert handler(_errors) == ref 305 | 306 | _errors.append(ValidationError(['bar'], ['foo'], 0x42, 'min', 1, 2, ())) 307 | ref.update({'bar': [handler.messages[0x42].format(constraint=1)]}) 308 | assert handler(_errors) == ref 309 | 310 | _errors.append( 311 | ValidationError( 312 | ['zap', 'foo'], ['zap', 'schema', 'foo'], 0x24, 'type', 'string', True, () 313 | ) 314 | ) 315 | ref.update({'zap': [{'foo': [handler.messages[0x24].format(constraint='string')]}]}) 316 | assert handler(_errors) == ref 317 | 318 | _errors.append( 319 | ValidationError( 320 | ['zap', 'foo'], 321 | ['zap', 'schema', 'foo'], 322 | 0x41, 323 | 'regex', 324 | '^p[äe]ng$', 325 | 'boom', 326 | (), 327 | ) 328 | ) 329 | ref['zap'][0]['foo'].append(handler.messages[0x41].format(constraint='^p[äe]ng$')) 330 | assert handler(_errors) == ref 331 | 332 | 333 | def test_basic_error_of_errors(validator): 334 | schema = {'foo': {'oneof': [{'type': 'integer'}, {'type': 'string'}]}} 335 | document = {'foo': 23.42} 336 | error = ('foo', ('foo', 'oneof'), errors.ONEOF, schema['foo']['oneof'], ()) 337 | child_errors = [ 338 | (error[0], error[1] + (0, 'type'), errors.BAD_TYPE, 'integer'), 339 | (error[0], error[1] + (1, 'type'), errors.BAD_TYPE, 'string'), 340 | ] 341 | assert_fail( 342 | document, schema, validator=validator, error=error, child_errors=child_errors 343 | ) 344 | assert validator.errors == { 345 | 'foo': [ 346 | errors.BasicErrorHandler.messages[0x92], 347 | { 348 | 'oneof definition 0': ['must be of integer type'], 349 | 'oneof definition 1': ['must be of string type'], 350 | }, 351 | ] 352 | } 353 | 354 | 355 | def test_wrong_amount_of_items(validator): 356 | # https://github.com/pyeve/cerberus/issues/505 357 | validator.schema = { 358 | 'test_list': { 359 | 'type': 'list', 360 | 'required': True, 361 | 'items': [{'type': 'string'}, {'type': 'string'}], 362 | } 363 | } 364 | validator({'test_list': ['test']}) 365 | assert validator.errors == {'test_list': ["length of list should be 2, it is 1"]} 366 | -------------------------------------------------------------------------------- /cerberus/tests/test_legacy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | pass 4 | -------------------------------------------------------------------------------- /cerberus/tests/test_normalization.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from copy import deepcopy 4 | from tempfile import NamedTemporaryFile 5 | 6 | from pytest import mark 7 | 8 | from cerberus import Validator, errors 9 | from cerberus.tests import ( 10 | assert_fail, 11 | assert_has_error, 12 | assert_normalized, 13 | assert_success, 14 | ) 15 | 16 | 17 | def must_not_be_called(*args, **kwargs): 18 | raise RuntimeError('This shall not be called.') 19 | 20 | 21 | def test_coerce(): 22 | schema = {'amount': {'coerce': int}} 23 | document = {'amount': '1'} 24 | expected = {'amount': 1} 25 | assert_normalized(document, expected, schema) 26 | 27 | 28 | def test_coerce_in_dictschema(): 29 | schema = {'thing': {'type': 'dict', 'schema': {'amount': {'coerce': int}}}} 30 | document = {'thing': {'amount': '2'}} 31 | expected = {'thing': {'amount': 2}} 32 | assert_normalized(document, expected, schema) 33 | 34 | 35 | def test_coerce_in_listschema(): 36 | schema = {'things': {'type': 'list', 'schema': {'coerce': int}}} 37 | document = {'things': ['1', '2', '3']} 38 | expected = {'things': [1, 2, 3]} 39 | assert_normalized(document, expected, schema) 40 | 41 | 42 | def test_coerce_in_listitems(): 43 | schema = {'things': {'type': 'list', 'items': [{'coerce': int}, {'coerce': str}]}} 44 | document = {'things': ['1', 2]} 45 | expected = {'things': [1, '2']} 46 | assert_normalized(document, expected, schema) 47 | 48 | validator = Validator(schema) 49 | document['things'].append(3) 50 | assert not validator(document) 51 | assert validator.document['things'] == document['things'] 52 | 53 | 54 | def test_coerce_in_dictschema_in_listschema(): 55 | item_schema = {'type': 'dict', 'schema': {'amount': {'coerce': int}}} 56 | schema = {'things': {'type': 'list', 'schema': item_schema}} 57 | document = {'things': [{'amount': '2'}]} 58 | expected = {'things': [{'amount': 2}]} 59 | assert_normalized(document, expected, schema) 60 | 61 | 62 | def test_coerce_not_destructive(): 63 | schema = {'amount': {'coerce': int}} 64 | v = Validator(schema) 65 | doc = {'amount': '1'} 66 | v.validate(doc) 67 | assert v.document is not doc 68 | 69 | 70 | def test_coerce_catches_ValueError(): 71 | schema = {'amount': {'coerce': int}} 72 | _errors = assert_fail({'amount': 'not_a_number'}, schema) 73 | _errors[0].info = () # ignore exception message here 74 | assert_has_error( 75 | _errors, 'amount', ('amount', 'coerce'), errors.COERCION_FAILED, int 76 | ) 77 | 78 | 79 | def test_coerce_in_listitems_catches_ValueError(): 80 | schema = {'things': {'type': 'list', 'items': [{'coerce': int}, {'coerce': str}]}} 81 | document = {'things': ['not_a_number', 2]} 82 | _errors = assert_fail(document, schema) 83 | _errors[0].info = () # ignore exception message here 84 | assert_has_error( 85 | _errors, 86 | ('things', 0), 87 | ('things', 'items', 'coerce'), 88 | errors.COERCION_FAILED, 89 | int, 90 | ) 91 | 92 | 93 | def test_coerce_catches_TypeError(): 94 | schema = {'name': {'coerce': str.lower}} 95 | _errors = assert_fail({'name': 1234}, schema) 96 | _errors[0].info = () # ignore exception message here 97 | assert_has_error( 98 | _errors, 'name', ('name', 'coerce'), errors.COERCION_FAILED, str.lower 99 | ) 100 | 101 | 102 | def test_coerce_in_listitems_catches_TypeError(): 103 | schema = { 104 | 'things': {'type': 'list', 'items': [{'coerce': int}, {'coerce': str.lower}]} 105 | } 106 | document = {'things': ['1', 2]} 107 | _errors = assert_fail(document, schema) 108 | _errors[0].info = () # ignore exception message here 109 | assert_has_error( 110 | _errors, 111 | ('things', 1), 112 | ('things', 'items', 'coerce'), 113 | errors.COERCION_FAILED, 114 | str.lower, 115 | ) 116 | 117 | 118 | def test_coerce_unknown(): 119 | schema = {'foo': {'schema': {}, 'allow_unknown': {'coerce': int}}} 120 | document = {'foo': {'bar': '0'}} 121 | expected = {'foo': {'bar': 0}} 122 | assert_normalized(document, expected, schema) 123 | 124 | 125 | def test_custom_coerce_and_rename(): 126 | class MyNormalizer(Validator): 127 | def __init__(self, multiplier, *args, **kwargs): 128 | super(MyNormalizer, self).__init__(*args, **kwargs) 129 | self.multiplier = multiplier 130 | 131 | def _normalize_coerce_multiply(self, value): 132 | return value * self.multiplier 133 | 134 | v = MyNormalizer(2, {'foo': {'coerce': 'multiply'}}) 135 | assert v.normalized({'foo': 2})['foo'] == 4 136 | 137 | v = MyNormalizer(3, allow_unknown={'rename_handler': 'multiply'}) 138 | assert v.normalized({3: None}) == {9: None} 139 | 140 | 141 | def test_coerce_chain(): 142 | drop_prefix = lambda x: x[2:] # noqa: E731 143 | upper = lambda x: x.upper() # noqa: E731 144 | schema = {'foo': {'coerce': [hex, drop_prefix, upper]}} 145 | assert_normalized({'foo': 15}, {'foo': 'F'}, schema) 146 | 147 | 148 | def test_coerce_chain_aborts(validator): 149 | def dont_do_me(value): 150 | raise AssertionError('The coercion chain did not abort after an ' 'error.') 151 | 152 | schema = {'foo': {'coerce': [hex, dont_do_me]}} 153 | validator({'foo': '0'}, schema) 154 | assert errors.COERCION_FAILED in validator._errors 155 | 156 | 157 | def test_coerce_non_digit_in_sequence(validator): 158 | # https://github.com/pyeve/cerberus/issues/211 159 | schema = {'data': {'type': 'list', 'schema': {'type': 'integer', 'coerce': int}}} 160 | document = {'data': ['q']} 161 | assert validator.validated(document, schema) is None 162 | assert ( 163 | validator.validated(document, schema, always_return_document=True) == document 164 | ) # noqa: W503 165 | 166 | 167 | def test_nullables_dont_fail_coerce(): 168 | schema = {'foo': {'coerce': int, 'nullable': True, 'type': 'integer'}} 169 | document = {'foo': None} 170 | assert_normalized(document, document, schema) 171 | 172 | 173 | def test_nullables_fail_coerce_on_non_null_values(validator): 174 | def failing_coercion(value): 175 | raise Exception("expected to fail") 176 | 177 | schema = {'foo': {'coerce': failing_coercion, 'nullable': True, 'type': 'integer'}} 178 | document = {'foo': None} 179 | assert_normalized(document, document, schema) 180 | 181 | validator({'foo': 2}, schema) 182 | assert errors.COERCION_FAILED in validator._errors 183 | 184 | 185 | def test_normalized(): 186 | schema = {'amount': {'coerce': int}} 187 | document = {'amount': '2'} 188 | expected = {'amount': 2} 189 | assert_normalized(document, expected, schema) 190 | 191 | 192 | def test_rename(validator): 193 | schema = {'foo': {'rename': 'bar'}} 194 | document = {'foo': 0} 195 | expected = {'bar': 0} 196 | # We cannot use assertNormalized here since there is bug where 197 | # Cerberus says that the renamed field is an unknown field: 198 | # {'bar': 'unknown field'} 199 | validator(document, schema, False) 200 | assert validator.document == expected 201 | 202 | 203 | def test_rename_handler(): 204 | validator = Validator(allow_unknown={'rename_handler': int}) 205 | schema = {} 206 | document = {'0': 'foo'} 207 | expected = {0: 'foo'} 208 | assert_normalized(document, expected, schema, validator) 209 | 210 | 211 | def test_purge_unknown(): 212 | validator = Validator(purge_unknown=True) 213 | schema = {'foo': {'type': 'string'}} 214 | document = {'bar': 'foo'} 215 | expected = {} 216 | assert_normalized(document, expected, schema, validator) 217 | 218 | 219 | def test_purge_unknown_in_subschema(): 220 | schema = { 221 | 'foo': { 222 | 'type': 'dict', 223 | 'schema': {'foo': {'type': 'string'}}, 224 | 'purge_unknown': True, 225 | } 226 | } 227 | document = {'foo': {'bar': ''}} 228 | expected = {'foo': {}} 229 | assert_normalized(document, expected, schema) 230 | 231 | 232 | def test_issue_147_complex(): 233 | schema = {'revision': {'coerce': int}} 234 | document = {'revision': '5', 'file': NamedTemporaryFile(mode='w+')} 235 | document['file'].write(r'foobar') 236 | document['file'].seek(0) 237 | normalized = Validator(schema, allow_unknown=True).normalized(document) 238 | assert normalized['revision'] == 5 239 | assert normalized['file'].read() == 'foobar' 240 | document['file'].close() 241 | normalized['file'].close() 242 | 243 | 244 | def test_issue_147_nested_dict(): 245 | schema = {'thing': {'type': 'dict', 'schema': {'amount': {'coerce': int}}}} 246 | ref_obj = '2' 247 | document = {'thing': {'amount': ref_obj}} 248 | normalized = Validator(schema).normalized(document) 249 | assert document is not normalized 250 | assert normalized['thing']['amount'] == 2 251 | assert ref_obj == '2' 252 | assert document['thing']['amount'] is ref_obj 253 | 254 | 255 | def test_coerce_in_valuesrules(): 256 | # https://github.com/pyeve/cerberus/issues/155 257 | schema = { 258 | 'thing': {'type': 'dict', 'valuesrules': {'coerce': int, 'type': 'integer'}} 259 | } 260 | document = {'thing': {'amount': '2'}} 261 | expected = {'thing': {'amount': 2}} 262 | assert_normalized(document, expected, schema) 263 | 264 | 265 | def test_coerce_in_keysrules(): 266 | # https://github.com/pyeve/cerberus/issues/155 267 | schema = { 268 | 'thing': {'type': 'dict', 'keysrules': {'coerce': int, 'type': 'integer'}} 269 | } 270 | document = {'thing': {'5': 'foo'}} 271 | expected = {'thing': {5: 'foo'}} 272 | assert_normalized(document, expected, schema) 273 | 274 | 275 | def test_coercion_of_sequence_items(validator): 276 | # https://github.com/pyeve/cerberus/issues/161 277 | schema = {'a_list': {'type': 'list', 'schema': {'type': 'float', 'coerce': float}}} 278 | document = {'a_list': [3, 4, 5]} 279 | expected = {'a_list': [3.0, 4.0, 5.0]} 280 | assert_normalized(document, expected, schema, validator) 281 | for x in validator.document['a_list']: 282 | assert isinstance(x, float) 283 | 284 | 285 | @mark.parametrize( 286 | 'default', ({'default': 'bar_value'}, {'default_setter': lambda doc: 'bar_value'}) 287 | ) 288 | def test_default_missing(default): 289 | bar_schema = {'type': 'string'} 290 | bar_schema.update(default) 291 | schema = {'foo': {'type': 'string'}, 'bar': bar_schema} 292 | document = {'foo': 'foo_value'} 293 | expected = {'foo': 'foo_value', 'bar': 'bar_value'} 294 | assert_normalized(document, expected, schema) 295 | 296 | 297 | @mark.parametrize( 298 | 'default', ({'default': 'bar_value'}, {'default_setter': must_not_be_called}) 299 | ) 300 | def test_default_existent(default): 301 | bar_schema = {'type': 'string'} 302 | bar_schema.update(default) 303 | schema = {'foo': {'type': 'string'}, 'bar': bar_schema} 304 | document = {'foo': 'foo_value', 'bar': 'non_default'} 305 | assert_normalized(document, document.copy(), schema) 306 | 307 | 308 | @mark.parametrize( 309 | 'default', ({'default': 'bar_value'}, {'default_setter': must_not_be_called}) 310 | ) 311 | def test_default_none_nullable(default): 312 | bar_schema = {'type': 'string', 'nullable': True} 313 | bar_schema.update(default) 314 | schema = {'foo': {'type': 'string'}, 'bar': bar_schema} 315 | document = {'foo': 'foo_value', 'bar': None} 316 | assert_normalized(document, document.copy(), schema) 317 | 318 | 319 | @mark.parametrize( 320 | 'default', ({'default': 'bar_value'}, {'default_setter': lambda doc: 'bar_value'}) 321 | ) 322 | def test_default_none_nonnullable(default): 323 | bar_schema = {'type': 'string', 'nullable': False} 324 | bar_schema.update(default) 325 | schema = {'foo': {'type': 'string'}, 'bar': bar_schema} 326 | document = {'foo': 'foo_value', 'bar': None} 327 | expected = {'foo': 'foo_value', 'bar': 'bar_value'} 328 | assert_normalized(document, expected, schema) 329 | 330 | 331 | def test_default_none_default_value(): 332 | schema = { 333 | 'foo': {'type': 'string'}, 334 | 'bar': {'type': 'string', 'nullable': True, 'default': None}, 335 | } 336 | document = {'foo': 'foo_value'} 337 | expected = {'foo': 'foo_value', 'bar': None} 338 | assert_normalized(document, expected, schema) 339 | 340 | 341 | @mark.parametrize( 342 | 'default', ({'default': 'bar_value'}, {'default_setter': lambda doc: 'bar_value'}) 343 | ) 344 | def test_default_missing_in_subschema(default): 345 | bar_schema = {'type': 'string'} 346 | bar_schema.update(default) 347 | schema = { 348 | 'thing': { 349 | 'type': 'dict', 350 | 'schema': {'foo': {'type': 'string'}, 'bar': bar_schema}, 351 | } 352 | } 353 | document = {'thing': {'foo': 'foo_value'}} 354 | expected = {'thing': {'foo': 'foo_value', 'bar': 'bar_value'}} 355 | assert_normalized(document, expected, schema) 356 | 357 | 358 | def test_depending_default_setters(): 359 | schema = { 360 | 'a': {'type': 'integer'}, 361 | 'b': {'type': 'integer', 'default_setter': lambda d: d['a'] + 1}, 362 | 'c': {'type': 'integer', 'default_setter': lambda d: d['b'] * 2}, 363 | 'd': {'type': 'integer', 'default_setter': lambda d: d['b'] + d['c']}, 364 | } 365 | document = {'a': 1} 366 | expected = {'a': 1, 'b': 2, 'c': 4, 'd': 6} 367 | assert_normalized(document, expected, schema) 368 | 369 | 370 | def test_circular_depending_default_setters(validator): 371 | schema = { 372 | 'a': {'type': 'integer', 'default_setter': lambda d: d['b'] + 1}, 373 | 'b': {'type': 'integer', 'default_setter': lambda d: d['a'] + 1}, 374 | } 375 | validator({}, schema) 376 | assert errors.SETTING_DEFAULT_FAILED in validator._errors 377 | 378 | 379 | def test_issue_250(): 380 | # https://github.com/pyeve/cerberus/issues/250 381 | schema = { 382 | 'list': { 383 | 'type': 'list', 384 | 'schema': { 385 | 'type': 'dict', 386 | 'allow_unknown': True, 387 | 'schema': {'a': {'type': 'string'}}, 388 | }, 389 | } 390 | } 391 | document = {'list': {'is_a': 'mapping'}} 392 | assert_fail( 393 | document, 394 | schema, 395 | error=('list', ('list', 'type'), errors.BAD_TYPE, schema['list']['type']), 396 | ) 397 | 398 | 399 | def test_issue_250_no_type_pass_on_list(): 400 | # https://github.com/pyeve/cerberus/issues/250 401 | schema = { 402 | 'list': { 403 | 'schema': { 404 | 'allow_unknown': True, 405 | 'type': 'dict', 406 | 'schema': {'a': {'type': 'string'}}, 407 | } 408 | } 409 | } 410 | document = {'list': [{'a': 'known', 'b': 'unknown'}]} 411 | assert_normalized(document, document, schema) 412 | 413 | 414 | def test_issue_250_no_type_fail_on_dict(): 415 | # https://github.com/pyeve/cerberus/issues/250 416 | schema = { 417 | 'list': {'schema': {'allow_unknown': True, 'schema': {'a': {'type': 'string'}}}} 418 | } 419 | document = {'list': {'a': {'a': 'known'}}} 420 | assert_fail( 421 | document, 422 | schema, 423 | error=( 424 | 'list', 425 | ('list', 'schema'), 426 | errors.BAD_TYPE_FOR_SCHEMA, 427 | schema['list']['schema'], 428 | ), 429 | ) 430 | 431 | 432 | def test_issue_250_no_type_fail_pass_on_other(): 433 | # https://github.com/pyeve/cerberus/issues/250 434 | schema = { 435 | 'list': {'schema': {'allow_unknown': True, 'schema': {'a': {'type': 'string'}}}} 436 | } 437 | document = {'list': 1} 438 | assert_normalized(document, document, schema) 439 | 440 | 441 | def test_allow_unknown_with_of_rules(): 442 | # https://github.com/pyeve/cerberus/issues/251 443 | schema = { 444 | 'test': { 445 | 'oneof': [ 446 | { 447 | 'type': 'dict', 448 | 'allow_unknown': True, 449 | 'schema': {'known': {'type': 'string'}}, 450 | }, 451 | {'type': 'dict', 'schema': {'known': {'type': 'string'}}}, 452 | ] 453 | } 454 | } 455 | # check regression and that allow unknown does not cause any different 456 | # than expected behaviour for one-of. 457 | document = {'test': {'known': 's'}} 458 | assert_fail( 459 | document, 460 | schema, 461 | error=('test', ('test', 'oneof'), errors.ONEOF, schema['test']['oneof']), 462 | ) 463 | # check that allow_unknown is actually applied 464 | document = {'test': {'known': 's', 'unknown': 'asd'}} 465 | assert_success(document, schema) 466 | 467 | 468 | def test_271_normalising_tuples(): 469 | # https://github.com/pyeve/cerberus/issues/271 470 | schema = { 471 | 'my_field': {'type': 'list', 'schema': {'type': ('string', 'number', 'dict')}} 472 | } 473 | document = {'my_field': ('foo', 'bar', 42, 'albert', 'kandinsky', {'items': 23})} 474 | assert_success(document, schema) 475 | 476 | normalized = Validator(schema).normalized(document) 477 | assert normalized['my_field'] == ( 478 | 'foo', 479 | 'bar', 480 | 42, 481 | 'albert', 482 | 'kandinsky', 483 | {'items': 23}, 484 | ) 485 | 486 | 487 | def test_allow_unknown_wo_schema(): 488 | # https://github.com/pyeve/cerberus/issues/302 489 | v = Validator({'a': {'type': 'dict', 'allow_unknown': True}}) 490 | v({'a': {}}) 491 | 492 | 493 | def test_allow_unknown_with_purge_unknown(): 494 | validator = Validator(purge_unknown=True) 495 | schema = {'foo': {'type': 'dict', 'allow_unknown': True}} 496 | document = {'foo': {'bar': True}, 'bar': 'foo'} 497 | expected = {'foo': {'bar': True}} 498 | assert_normalized(document, expected, schema, validator) 499 | 500 | 501 | def test_allow_unknown_with_purge_unknown_subdocument(): 502 | validator = Validator(purge_unknown=True) 503 | schema = { 504 | 'foo': { 505 | 'type': 'dict', 506 | 'schema': {'bar': {'type': 'string'}}, 507 | 'allow_unknown': True, 508 | } 509 | } 510 | document = {'foo': {'bar': 'baz', 'corge': False}, 'thud': 'xyzzy'} 511 | expected = {'foo': {'bar': 'baz', 'corge': False}} 512 | assert_normalized(document, expected, schema, validator) 513 | 514 | 515 | def test_purge_readonly(): 516 | schema = { 517 | 'description': {'type': 'string', 'maxlength': 500}, 518 | 'last_updated': {'readonly': True}, 519 | } 520 | validator = Validator(schema=schema, purge_readonly=True) 521 | document = {'description': 'it is a thing'} 522 | expected = deepcopy(document) 523 | document['last_updated'] = 'future' 524 | assert_normalized(document, expected, validator=validator) 525 | 526 | 527 | def test_defaults_in_allow_unknown_schema(): 528 | schema = {'meta': {'type': 'dict'}, 'version': {'type': 'string'}} 529 | allow_unknown = { 530 | 'type': 'dict', 531 | 'schema': { 532 | 'cfg_path': {'type': 'string', 'default': 'cfg.yaml'}, 533 | 'package': {'type': 'string'}, 534 | }, 535 | } 536 | validator = Validator(schema=schema, allow_unknown=allow_unknown) 537 | 538 | document = {'version': '1.2.3', 'plugin_foo': {'package': 'foo'}} 539 | expected = { 540 | 'version': '1.2.3', 541 | 'plugin_foo': {'package': 'foo', 'cfg_path': 'cfg.yaml'}, 542 | } 543 | assert_normalized(document, expected, schema, validator) 544 | -------------------------------------------------------------------------------- /cerberus/tests/test_registries.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from cerberus import schema_registry, rules_set_registry, Validator 4 | from cerberus.tests import ( 5 | assert_fail, 6 | assert_normalized, 7 | assert_schema_error, 8 | assert_success, 9 | ) 10 | 11 | 12 | def test_schema_registry_simple(): 13 | schema_registry.add('foo', {'bar': {'type': 'string'}}) 14 | schema = {'a': {'schema': 'foo'}, 'b': {'schema': 'foo'}} 15 | document = {'a': {'bar': 'a'}, 'b': {'bar': 'b'}} 16 | assert_success(document, schema) 17 | 18 | 19 | def test_top_level_reference(): 20 | schema_registry.add('peng', {'foo': {'type': 'integer'}}) 21 | document = {'foo': 42} 22 | assert_success(document, 'peng') 23 | 24 | 25 | def test_rules_set_simple(): 26 | rules_set_registry.add('foo', {'type': 'integer'}) 27 | assert_success({'bar': 1}, {'bar': 'foo'}) 28 | assert_fail({'bar': 'one'}, {'bar': 'foo'}) 29 | 30 | 31 | def test_allow_unknown_as_reference(): 32 | rules_set_registry.add('foo', {'type': 'number'}) 33 | v = Validator(allow_unknown='foo') 34 | assert_success({0: 1}, {}, v) 35 | assert_fail({0: 'one'}, {}, v) 36 | 37 | 38 | def test_recursion(): 39 | rules_set_registry.add('self', {'type': 'dict', 'allow_unknown': 'self'}) 40 | v = Validator(allow_unknown='self') 41 | assert_success({0: {1: {2: {}}}}, {}, v) 42 | 43 | 44 | def test_references_remain_unresolved(validator): 45 | rules_set_registry.extend( 46 | (('boolean', {'type': 'boolean'}), ('booleans', {'valuesrules': 'boolean'})) 47 | ) 48 | validator.schema = {'foo': 'booleans'} 49 | assert 'booleans' == validator.schema['foo'] 50 | assert 'boolean' == rules_set_registry._storage['booleans']['valuesrules'] 51 | 52 | 53 | def test_rules_registry_with_anyof_type(): 54 | rules_set_registry.add('string_or_integer', {'anyof_type': ['string', 'integer']}) 55 | schema = {'soi': 'string_or_integer'} 56 | assert_success({'soi': 'hello'}, schema) 57 | 58 | 59 | def test_schema_registry_with_anyof_type(): 60 | schema_registry.add('soi_id', {'id': {'anyof_type': ['string', 'integer']}}) 61 | schema = {'soi': {'schema': 'soi_id'}} 62 | assert_success({'soi': {'id': 'hello'}}, schema) 63 | 64 | 65 | def test_normalization_with_rules_set(): 66 | # https://github.com/pyeve/cerberus/issues/283 67 | rules_set_registry.add('foo', {'default': 42}) 68 | assert_normalized({}, {'bar': 42}, {'bar': 'foo'}) 69 | rules_set_registry.add('foo', {'default_setter': lambda _: 42}) 70 | assert_normalized({}, {'bar': 42}, {'bar': 'foo'}) 71 | rules_set_registry.add('foo', {'type': 'integer', 'nullable': True}) 72 | assert_success({'bar': None}, {'bar': 'foo'}) 73 | 74 | 75 | def test_rules_set_with_dict_field(): 76 | document = {'a_dict': {'foo': 1}} 77 | schema = {'a_dict': {'type': 'dict', 'schema': {'foo': 'rule'}}} 78 | 79 | # the schema's not yet added to the valid ones, so test the faulty first 80 | rules_set_registry.add('rule', {'tüpe': 'integer'}) 81 | assert_schema_error(document, schema) 82 | 83 | rules_set_registry.add('rule', {'type': 'integer'}) 84 | assert_success(document, schema) 85 | -------------------------------------------------------------------------------- /cerberus/tests/test_schema.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import re 4 | 5 | import pytest 6 | 7 | from cerberus import Validator, errors, SchemaError 8 | from cerberus.schema import UnvalidatedSchema 9 | from cerberus.tests import assert_schema_error 10 | 11 | 12 | def test_empty_schema(): 13 | validator = Validator() 14 | with pytest.raises(SchemaError, match=errors.SCHEMA_ERROR_MISSING): 15 | validator({}, schema=None) 16 | 17 | 18 | def test_bad_schema_type(validator): 19 | schema = "this string should really be dict" 20 | msg = errors.SCHEMA_ERROR_DEFINITION_TYPE.format(schema) 21 | with pytest.raises(SchemaError, match=msg): 22 | validator.schema = schema 23 | 24 | 25 | def test_bad_schema_type_field(validator): 26 | field = 'foo' 27 | schema = {field: {'schema': {'bar': {'type': 'strong'}}}} 28 | with pytest.raises(SchemaError): 29 | validator.schema = schema 30 | 31 | 32 | def test_unknown_rule(validator): 33 | msg = "{'foo': [{'unknown': ['unknown rule']}]}" 34 | with pytest.raises(SchemaError, match=re.escape(msg)): 35 | validator.schema = {'foo': {'unknown': 'rule'}} 36 | 37 | 38 | def test_unknown_type(validator): 39 | msg = str({'foo': [{'type': ['Unsupported types: unknown']}]}) 40 | with pytest.raises(SchemaError, match=re.escape(msg)): 41 | validator.schema = {'foo': {'type': 'unknown'}} 42 | 43 | 44 | def test_bad_schema_definition(validator): 45 | field = 'name' 46 | msg = str({field: ['must be of dict type']}) 47 | with pytest.raises(SchemaError, match=re.escape(msg)): 48 | validator.schema = {field: 'this should really be a dict'} 49 | 50 | 51 | def test_bad_of_rules(): 52 | schema = {'foo': {'anyof': {'type': 'string'}}} 53 | assert_schema_error({}, schema) 54 | 55 | 56 | def test_normalization_rules_are_invalid_in_of_rules(): 57 | schema = {0: {'anyof': [{'coerce': lambda x: x}]}} 58 | assert_schema_error({}, schema) 59 | 60 | 61 | def test_anyof_allof_schema_validate(): 62 | # make sure schema with 'anyof' and 'allof' constraints are checked 63 | # correctly 64 | schema = { 65 | 'doc': {'type': 'dict', 'anyof': [{'schema': [{'param': {'type': 'number'}}]}]} 66 | } 67 | assert_schema_error({'doc': 'this is my document'}, schema) 68 | 69 | schema = { 70 | 'doc': {'type': 'dict', 'allof': [{'schema': [{'param': {'type': 'number'}}]}]} 71 | } 72 | assert_schema_error({'doc': 'this is my document'}, schema) 73 | 74 | 75 | def test_repr(): 76 | v = Validator({'foo': {'type': 'string'}}) 77 | assert repr(v.schema) == "{'foo': {'type': 'string'}}" 78 | 79 | 80 | def test_validated_schema_cache(): 81 | v = Validator({'foozifix': {'coerce': int}}) 82 | cache_size = len(v._valid_schemas) 83 | 84 | v = Validator({'foozifix': {'type': 'integer'}}) 85 | cache_size += 1 86 | assert len(v._valid_schemas) == cache_size 87 | 88 | v = Validator({'foozifix': {'coerce': int}}) 89 | assert len(v._valid_schemas) == cache_size 90 | 91 | max_cache_size = 163 92 | assert cache_size <= max_cache_size, ( 93 | "There's an unexpected high amount (%s) of cached valid " 94 | "definition schemas. Unless you added further tests, " 95 | "there are good chances that something is wrong. " 96 | "If you added tests with new schemas, you can try to " 97 | "adjust the variable `max_cache_size` according to " 98 | "the added schemas." % cache_size 99 | ) 100 | 101 | 102 | def test_expansion_in_nested_schema(): 103 | schema = {'detroit': {'schema': {'anyof_regex': ['^Aladdin', 'Sane$']}}} 104 | v = Validator(schema) 105 | assert v.schema['detroit']['schema'] == { 106 | 'anyof': [{'regex': '^Aladdin'}, {'regex': 'Sane$'}] 107 | } 108 | 109 | 110 | def test_unvalidated_schema_can_be_copied(): 111 | schema = UnvalidatedSchema() 112 | schema_copy = schema.copy() 113 | assert schema_copy == schema 114 | 115 | 116 | # TODO remove with next major release 117 | def test_deprecated_rule_names_in_valueschema(): 118 | def check_with(field, value, error): 119 | pass 120 | 121 | schema = { 122 | "field_1": { 123 | "type": "dict", 124 | "valueschema": { 125 | "type": "dict", 126 | "keyschema": {"type": "string"}, 127 | "valueschema": {"type": "string"}, 128 | }, 129 | }, 130 | "field_2": { 131 | "type": "list", 132 | "items": [ 133 | {"keyschema": {}}, 134 | {"validator": check_with}, 135 | {"valueschema": {}}, 136 | ], 137 | }, 138 | } 139 | 140 | with pytest.deprecated_call(): 141 | validator = Validator(schema) 142 | 143 | assert validator.schema == { 144 | "field_1": { 145 | "type": "dict", 146 | "valuesrules": { 147 | "type": "dict", 148 | "keysrules": {"type": "string"}, 149 | "valuesrules": {"type": "string"}, 150 | }, 151 | }, 152 | "field_2": { 153 | "type": "list", 154 | "items": [ 155 | {"keysrules": {}}, 156 | {"check_with": check_with}, 157 | {"valuesrules": {}}, 158 | ], 159 | }, 160 | } 161 | 162 | 163 | def test_anyof_check_with(): 164 | def foo(field, value, error): 165 | pass 166 | 167 | def bar(field, value, error): 168 | pass 169 | 170 | schema = {'field': {'anyof_check_with': [foo, bar]}} 171 | validator = Validator(schema) 172 | 173 | assert validator.schema == { 174 | 'field': {'anyof': [{'check_with': foo}, {'check_with': bar}]} 175 | } 176 | 177 | 178 | def test_rulename_space_is_normalized(): 179 | Validator(schema={"field": {"default setter": lambda x: x, "type": "string"}}) 180 | -------------------------------------------------------------------------------- /cerberus/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from cerberus.utils import compare_paths_lt 2 | 3 | 4 | def test_compare_paths(): 5 | lesser = ('a_dict', 'keysrules') 6 | greater = ('a_dict', 'valuesrules') 7 | assert compare_paths_lt(lesser, greater) 8 | 9 | lesser += ('type',) 10 | greater += ('regex',) 11 | assert compare_paths_lt(lesser, greater) 12 | -------------------------------------------------------------------------------- /cerberus/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from collections import namedtuple 4 | 5 | from cerberus.platform import _int_types, _str_type, Mapping, Sequence, Set 6 | 7 | 8 | TypeDefinition = namedtuple('TypeDefinition', 'name,included_types,excluded_types') 9 | """ 10 | This class is used to define types that can be used as value in the 11 | :attr:`~cerberus.Validator.types_mapping` property. 12 | The ``name`` should be descriptive and match the key it is going to be assigned 13 | to. 14 | A value that is validated against such definition must be an instance of any of 15 | the types contained in ``included_types`` and must not match any of the types 16 | contained in ``excluded_types``. 17 | """ 18 | 19 | 20 | def compare_paths_lt(x, y): 21 | min_length = min(len(x), len(y)) 22 | 23 | if x[:min_length] == y[:min_length]: 24 | return len(x) == min_length 25 | 26 | for i in range(min_length): 27 | a, b = x[i], y[i] 28 | 29 | for _type in (_int_types, _str_type, tuple): 30 | if isinstance(a, _type): 31 | if isinstance(b, _type): 32 | break 33 | else: 34 | return True 35 | 36 | if a == b: 37 | continue 38 | elif a < b: 39 | return True 40 | else: 41 | return False 42 | 43 | raise RuntimeError 44 | 45 | 46 | def drop_item_from_tuple(t, i): 47 | return t[:i] + t[i + 1 :] 48 | 49 | 50 | def get_Validator_class(): 51 | global Validator 52 | if 'Validator' not in globals(): 53 | from cerberus.validator import Validator 54 | return Validator 55 | 56 | 57 | def mapping_hash(schema): 58 | return hash(mapping_to_frozenset(schema)) 59 | 60 | 61 | def mapping_to_frozenset(mapping): 62 | """ 63 | Be aware that this treats any sequence type with the equal members as equal. As it 64 | is used to identify equality of schemas, this can be considered okay as definitions 65 | are semantically equal regardless the container type. 66 | """ 67 | 68 | aggregation = {} 69 | 70 | for key, value in mapping.items(): 71 | if isinstance(value, Mapping): 72 | aggregation[key] = mapping_to_frozenset(value) 73 | elif isinstance(value, Sequence): 74 | value = list(value) 75 | for i, item in enumerate(value): 76 | if isinstance(item, Mapping): 77 | value[i] = mapping_to_frozenset(item) 78 | aggregation[key] = tuple(value) 79 | elif isinstance(value, Set): 80 | aggregation[key] = frozenset(value) 81 | else: 82 | aggregation[key] = value 83 | 84 | return frozenset(aggregation.items()) 85 | 86 | 87 | def quote_string(value): 88 | if isinstance(value, _str_type): 89 | return '"%s"' % value 90 | else: 91 | return value 92 | 93 | 94 | class readonly_classproperty(property): 95 | def __get__(self, instance, owner): 96 | return super(readonly_classproperty, self).__get__(owner) 97 | 98 | def __set__(self, instance, value): 99 | raise RuntimeError('This is a readonly class property.') 100 | 101 | def __delete__(self, instance): 102 | raise RuntimeError('This is a readonly class property.') 103 | 104 | 105 | def validator_factory(name, bases=None, namespace={}): 106 | """ 107 | Dynamically create a :class:`~cerberus.Validator` subclass. 108 | Docstrings of mixin-classes will be added to the resulting class' one if ``__doc__`` 109 | is not in :obj:`namespace`. 110 | 111 | :param name: The name of the new class. 112 | :type name: :class:`str` 113 | :param bases: Class(es) with additional and overriding attributes. 114 | :type bases: :class:`tuple` of or a single :term:`class` 115 | :param namespace: Attributes for the new class. 116 | :type namespace: :class:`dict` 117 | :return: The created class. 118 | """ 119 | Validator = get_Validator_class() 120 | 121 | if bases is None: 122 | bases = (Validator,) 123 | elif isinstance(bases, tuple): 124 | bases += (Validator,) 125 | else: 126 | bases = (bases, Validator) 127 | 128 | docstrings = [x.__doc__ for x in bases if x.__doc__] 129 | if len(docstrings) > 1 and '__doc__' not in namespace: 130 | namespace.update({'__doc__': '\n'.join(docstrings)}) 131 | 132 | return type(name, bases, namespace) 133 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 18 | 19 | help: 20 | @echo "Please use \`make ' where is one of" 21 | @echo " html to make standalone HTML files" 22 | @echo " dirhtml to make HTML files named index.html in directories" 23 | @echo " singlehtml to make a single large HTML file" 24 | @echo " pickle to make pickle files" 25 | @echo " json to make JSON files" 26 | @echo " htmlhelp to make HTML files and a HTML help project" 27 | @echo " qthelp to make HTML files and a qthelp project" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 31 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 32 | @echo " text to make text files" 33 | @echo " man to make manual pages" 34 | @echo " texinfo to make Texinfo files" 35 | @echo " info to make Texinfo files and run them through makeinfo" 36 | @echo " gettext to make PO message catalogs" 37 | @echo " changes to make an overview of all changed/added/deprecated items" 38 | @echo " linkcheck to check all external links for integrity" 39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 40 | 41 | clean: 42 | -rm -rf $(BUILDDIR)/* 43 | -rm -rf $(INCLUDESDIR)/*.rst 44 | 45 | html: 46 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 47 | @echo 48 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 49 | 50 | dirhtml: 51 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 52 | @echo 53 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 54 | 55 | singlehtml: 56 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 57 | @echo 58 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 59 | 60 | pickle: 61 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 62 | @echo 63 | @echo "Build finished; now you can process the pickle files." 64 | 65 | json: 66 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 67 | @echo 68 | @echo "Build finished; now you can process the JSON files." 69 | 70 | htmlhelp: 71 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 72 | @echo 73 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 74 | ".hhp project file in $(BUILDDIR)/htmlhelp." 75 | 76 | qthelp: 77 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 78 | @echo 79 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 80 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 81 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Cerberus.qhcp" 82 | @echo "To view the help file:" 83 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Cerberus.qhc" 84 | 85 | devhelp: 86 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 87 | @echo 88 | @echo "Build finished." 89 | @echo "To view the help file:" 90 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Cerberus" 91 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Cerberus" 92 | @echo "# devhelp" 93 | 94 | epub: 95 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 96 | @echo 97 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 98 | 99 | latex: 100 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 101 | @echo 102 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 103 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 104 | "(use \`make latexpdf' here to do that automatically)." 105 | 106 | latexpdf: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo "Running LaTeX files through pdflatex..." 109 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 110 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 111 | 112 | text: 113 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 114 | @echo 115 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 116 | 117 | man: 118 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 119 | @echo 120 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 121 | 122 | texinfo: 123 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 124 | @echo 125 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 126 | @echo "Run \`make' in that directory to run these through makeinfo" \ 127 | "(use \`make info' here to do that automatically)." 128 | 129 | info: 130 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 131 | @echo "Running Texinfo files through makeinfo..." 132 | make -C $(BUILDDIR)/texinfo info 133 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 134 | 135 | gettext: 136 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 137 | @echo 138 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 139 | 140 | changes: 141 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 142 | @echo 143 | @echo "The overview file is in $(BUILDDIR)/changes." 144 | 145 | linkcheck: 146 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 147 | @echo 148 | @echo "Link check complete; look for any errors in the above output " \ 149 | "or in $(BUILDDIR)/linkcheck/output.txt." 150 | 151 | doctest: 152 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 153 | @echo "Testing of doctests in the sources finished, look at the " \ 154 | "results in $(BUILDDIR)/doctest/output.txt." 155 | -------------------------------------------------------------------------------- /docs/_static/cerberus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyeve/cerberus/c07c2f942873bd90d333347cb679850a85680aa6/docs/_static/cerberus.png -------------------------------------------------------------------------------- /docs/_static/style.css: -------------------------------------------------------------------------------- 1 | @media (prefers-color-scheme: dark) {body:not([data-theme="light"]) .sidebar-logo {filter: invert(1);}} 2 | .sidebar-logo {max-height: 8em;} 3 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | API Documentation 2 | ================= 3 | 4 | Validator Class 5 | --------------- 6 | 7 | .. autoclass:: cerberus.Validator 8 | :members: allow_unknown, clear_caches, document, document_error_tree, 9 | document_path, _drop_remaining_rules, _error, error_handler, 10 | _errors, errors, _get_child_validator, ignore_none_values, 11 | is_child, _lookup_field, mandatory_validations, normalized, 12 | priority_validations, purge_unknown, recent_error, 13 | require_all, _remaining_rules, root_allow_unknown, root_document, 14 | root_require_all, root_schema, rules_set_registry, schema, 15 | schema_error_tree, schema_path, schema_registry, types, 16 | types_mapping, _valid_schemas, validate, validated 17 | 18 | 19 | Rules Set & Schema Registry 20 | --------------------------- 21 | 22 | .. autoclass:: cerberus.schema.Registry 23 | :members: 24 | 25 | 26 | Type Definitions 27 | ---------------- 28 | 29 | .. autoclass:: cerberus.TypeDefinition 30 | 31 | 32 | Error Handlers 33 | -------------- 34 | 35 | .. autoclass:: cerberus.errors.BaseErrorHandler 36 | :members: 37 | :private-members: 38 | :special-members: 39 | 40 | .. autoclass:: cerberus.errors.BasicErrorHandler 41 | 42 | 43 | Python Error Representations 44 | ---------------------------- 45 | 46 | .. autoclass:: cerberus.errors.ErrorDefinition 47 | 48 | .. autoclass:: cerberus.errors.ValidationError 49 | :members: 50 | 51 | .. _error-codes: 52 | 53 | Error Codes 54 | ~~~~~~~~~~~ 55 | 56 | Its :attr:`code` attribute uniquely identifies an 57 | :class:`~cerberus.errors.ErrorDefinition` that is used a concrete error's 58 | :attr:`~cerberus.errors.ValidationError.code`. 59 | Some codes are actually reserved to mark a shared property of different errors. 60 | These are useful as bitmasks while processing errors. This is the list of the 61 | reserved codes: 62 | 63 | ============= ======== === =================================================== 64 | ``0110 0000`` ``0x60`` 96 An error that occurred during normalization. 65 | ``1000 0000`` ``0x80`` 128 An error that contains child errors. 66 | ``1001 0000`` ``0x90`` 144 An error that was emitted by one of the \*of-rules. 67 | ============= ======== === =================================================== 68 | 69 | None of these bits in the upper nibble must be used to enumerate error 70 | definitions, but only to mark one with the associated property. 71 | 72 | .. important:: 73 | 74 | Users are advised to set bit 8 for self-defined errors. So the code 75 | ``0001 0000 0001`` / ``0x101`` would the first in a domain-specific set of 76 | error definitions. 77 | 78 | 79 | This is a list of all error defintions that are shipped with the 80 | :mod:`~cerberus.errors` module: 81 | 82 | .. include:: includes/error-codes.rst 83 | 84 | Error Containers 85 | ~~~~~~~~~~~~~~~~ 86 | 87 | .. autoclass:: cerberus.errors.ErrorList 88 | 89 | .. autoclass:: cerberus.errors.ErrorTree 90 | :members: 91 | 92 | .. autoclass:: cerberus.errors.DocumentErrorTree 93 | 94 | .. autoclass:: cerberus.errors.SchemaErrorTree 95 | 96 | 97 | Exceptions 98 | ---------- 99 | 100 | .. autoexception:: cerberus.SchemaError 101 | 102 | .. autoexception:: cerberus.DocumentError 103 | 104 | 105 | Utilities 106 | --------- 107 | 108 | .. automodule:: cerberus.utils 109 | :members: 110 | 111 | 112 | .. _schema-validation-schema: 113 | 114 | Schema Validation Schema 115 | ------------------------ 116 | 117 | Against this schema validation schemas given to a vanilla 118 | :class:`~cerberus.Validator` will be validated: 119 | 120 | .. include:: includes/schema-validation-schema.rst 121 | -------------------------------------------------------------------------------- /docs/authors.rst: -------------------------------------------------------------------------------- 1 | Authors 2 | ======= 3 | .. include:: ../AUTHORS 4 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CHANGES.rst 2 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Cerberus documentation build configuration file, created by 4 | # sphinx-quickstart on Thu Oct 11 15:52:25 2012. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys 15 | import importlib 16 | from pathlib import Path 17 | 18 | if sys.version_info < (3, 6): 19 | raise RuntimeError( 20 | 'Requires Python 3.6 or later, running on %s atm.' % '.'.join(sys.version_info) 21 | ) 22 | 23 | 24 | module_spec = importlib.util.spec_from_file_location( 25 | 'generate_includes', Path(__file__).parent / 'includes' / 'generate.py' 26 | ) 27 | _module = importlib.util.module_from_spec(module_spec) 28 | module_spec.loader.exec_module(_module) 29 | 30 | 31 | # -- General configuration ----------------------------------------------------- 32 | 33 | # If your documentation needs a minimal Sphinx version, state it here. 34 | # needs_sphinx = '1.0' 35 | 36 | # Add any Sphinx extension module names here, as strings. They can be extensions 37 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 38 | extensions = [ 39 | 'alabaster', 40 | 'sphinx.ext.autodoc', 41 | 'sphinx.ext.doctest', 42 | 'sphinx.ext.extlinks', 43 | 'sphinx.ext.intersphinx', 44 | ] 45 | 46 | # Add any paths that contain templates here, relative to this directory. 47 | templates_path = ['_templates'] 48 | 49 | # The suffix of source filenames. 50 | source_suffix = '.rst' 51 | 52 | # The encoding of source files. 53 | # source_encoding = 'utf-8-sig' 54 | 55 | # The master toctree document. 56 | master_doc = 'index' 57 | 58 | # General information about the project. 59 | project = u'Cerberus' 60 | copyright = u'2012-2023, Nicola Iarocci' 61 | 62 | # The version info for the project you're documenting, acts as replacement for 63 | # |version| and |release|, also used in various other places throughout the 64 | # built documents. 65 | # 66 | # The full version, including alpha/beta/rc tags. 67 | release = __import__('cerberus').__version__ 68 | # The short X.Y version. 69 | version = release.split('-dev')[0] 70 | 71 | # The language for content autogenerated by Sphinx. Refer to documentation 72 | # for a list of supported languages. 73 | # language = None 74 | 75 | # There are two options for replacing |today|: either, you set today to some 76 | # non-false value, then it is used: 77 | # today = '' 78 | # Else, today_fmt is used as the format for a strftime call. 79 | # today_fmt = '%B %d, %Y' 80 | 81 | # List of patterns, relative to source directory, that match files and 82 | # directories to ignore when looking for source files. 83 | exclude_patterns = ['_build', 'includes'] 84 | 85 | # The reST default role (used for this markup: `text`) to use for all documents. 86 | # default_role = None 87 | 88 | # If true, '()' will be appended to :func: etc. cross-reference text. 89 | # add_function_parentheses = True 90 | 91 | # If true, the current module name will be prepended to all description 92 | # unit titles (such as .. function::). 93 | # add_module_names = True 94 | 95 | # If true, sectionauthor and moduleauthor directives will be shown in the 96 | # output. They are ignored by default. 97 | # show_authors = False 98 | 99 | # The name of the Pygments (syntax highlighting) style to use. 100 | pygments_style = "sphinx" 101 | pygments_dark_style = "monokai" 102 | 103 | # A list of ignored prefixes for module index sorting. 104 | # modindex_common_prefix = [] 105 | 106 | 107 | # -- Options for HTML output --------------------------------------------------- 108 | 109 | # The theme to use for HTML and HTML Help pages. See the documentation for 110 | # a list of builtin themes. 111 | html_theme = "furo" 112 | 113 | # Theme options are theme-specific and customize the look and feel of a theme 114 | # further. For a list of options available for each theme, see the 115 | # documentation. 116 | html_theme_options = { 117 | "navigation_with_keys": True, 118 | } 119 | 120 | # The name for this set of Sphinx documents. If None, it defaults to 121 | # " v documentation". 122 | html_title = "Cerberus — Data validation for Python" 123 | 124 | # A shorter title for the navigation bar. Default is the same as html_title. 125 | # html_short_title = None 126 | 127 | # The name of an image file (relative to this directory) to place at the top 128 | # of the sidebar. 129 | html_logo = "_static/cerberus.png" 130 | 131 | html_css_files = ["style.css"] 132 | 133 | # The name of an image file (within the static path) to use as favicon of the 134 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 135 | # pixels large. 136 | # html_favicon = None 137 | 138 | # Add any paths that contain custom static files (such as style sheets) here, 139 | # relative to this directory. They are copied after the builtin static files, 140 | # so a file named "default.css" will overwrite the builtin "default.css". 141 | html_static_path = ['_static'] 142 | 143 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 144 | # using the given strftime format. 145 | # html_last_updated_fmt = '%b %d, %Y' 146 | 147 | # If true, SmartyPants will be used to convert quotes and dashes to 148 | # typographically correct entities. 149 | # html_use_smartypants = True 150 | 151 | # Custom sidebar templates, maps document names to template names. 152 | # html_sidebars = {} 153 | 154 | # Additional templates that should be rendered to pages, maps page names to 155 | # template names. 156 | # html_additional_pages = {} 157 | 158 | # If false, no module index is generated. 159 | # html_domain_indices = True 160 | 161 | # If false, no index is generated. 162 | # html_use_index = True 163 | 164 | # If true, the index is split into individual pages for each letter. 165 | # html_split_index = False 166 | 167 | # If true, links to the reST sources are added to the pages. 168 | # html_show_sourcelink = True 169 | 170 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 171 | # html_show_sphinx = True 172 | 173 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 174 | # html_show_copyright = True 175 | 176 | # If true, an OpenSearch description file will be output, and all pages will 177 | # contain a tag referring to it. The value of this option must be the 178 | # base URL from which the finished HTML is served. 179 | # html_use_opensearch = '' 180 | 181 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 182 | # html_file_suffix = None 183 | 184 | # Output file base name for HTML help builder. 185 | htmlhelp_basename = 'Cerberusdoc' 186 | 187 | 188 | # -- Options for LaTeX output -------------------------------------------------- 189 | 190 | latex_elements = { 191 | # The paper size ('letterpaper' or 'a4paper'). 192 | # 'papersize': 'letterpaper', 193 | # The font size ('10pt', '11pt' or '12pt'). 194 | # 'pointsize': '10pt', 195 | # Additional stuff for the LaTeX preamble. 196 | # 'preamble': '', 197 | } 198 | 199 | # Grouping the document tree into LaTeX files. List of tuples 200 | # (source start file, target name, title, author, documentclass [howto/manual]). 201 | latex_documents = [ 202 | ('index', 'Cerberus.tex', u'Cerberus Documentation', u'Nicola Iarocci', 'manual') 203 | ] 204 | 205 | # The name of an image file (relative to this directory) to place at the top of 206 | # the title page. 207 | # latex_logo = None 208 | 209 | # For "manual" documents, if this is true, then toplevel headings are parts, 210 | # not chapters. 211 | # latex_use_parts = False 212 | 213 | # If true, show page references after internal links. 214 | # latex_show_pagerefs = False 215 | 216 | # If true, show URL addresses after external links. 217 | # latex_show_urls = False 218 | 219 | # Documents to append as an appendix to all manuals. 220 | # latex_appendices = [] 221 | 222 | # If false, no module index is generated. 223 | # latex_domain_indices = True 224 | 225 | 226 | # -- Options for manual page output -------------------------------------------- 227 | 228 | # One entry per manual page. List of tuples 229 | # (source start file, name, description, authors, manual section). 230 | man_pages = [('index', 'cerberus', u'Cerberus Documentation', [u'Nicola Iarocci'], 1)] 231 | 232 | # If true, show URL addresses after external links. 233 | # man_show_urls = False 234 | 235 | 236 | # -- Options for Texinfo output ------------------------------------------------ 237 | 238 | # Grouping the document tree into Texinfo files. List of tuples 239 | # (source start file, target name, title, author, 240 | # dir menu entry, description, category) 241 | texinfo_documents = [ 242 | ( 243 | 'index', 244 | 'Cerberus', 245 | u'Cerberus Documentation', 246 | u'Nicola Iarocci', 247 | 'Cerberus', 248 | 'One line description of project.', 249 | 'Miscellaneous', 250 | ) 251 | ] 252 | 253 | # Documents to append as an appendix to all manuals. 254 | # texinfo_appendices = [] 255 | 256 | # If false, no module index is generated. 257 | # texinfo_domain_indices = True 258 | 259 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 260 | # texinfo_show_urls = 'footnote' 261 | 262 | 263 | # -- Options for intersphinx extension ----------------------------------------- 264 | 265 | intersphinx_mapping = { 266 | 'py2': ('https://docs.python.org/2', None), 267 | 'py3': ('https://docs.python.org/3', None), 268 | } 269 | 270 | 271 | # -- Options for doclinks extension -------------------------------------------- 272 | 273 | linkcheck_ignore = [ 274 | r"^#", 275 | r"https://github.com/pyeve/cerberus/(issues|pull)/\d+", 276 | r"https://groups.google.com/forum/#!forum/.*", 277 | r"https://docs.python.org/(2|3)/glossary.html#.*", 278 | r"https://docs.python.org/(2|3)/library/.*", 279 | ] 280 | linkcheck_anchors = True 281 | 282 | # -- Options for doctest extension --------------------------------------------- 283 | 284 | doctest_global_setup = """ 285 | import cerberus 286 | Validator = cerberus.Validator 287 | v = Validator() 288 | """ 289 | 290 | 291 | # -- Options for extlinks extension -------------------------------------------- 292 | 293 | extlinks = {'issue': ('https://github.com/pyeve/cerberus/issues/%s', '#%s')} 294 | -------------------------------------------------------------------------------- /docs/contact.rst: -------------------------------------------------------------------------------- 1 | Contact 2 | ======= 3 | If you’ve scoured the :doc:`prose ` and :doc:`API documentation ` 4 | and still can’t find an answer to your question, below are various support 5 | resources that should help. We do request that you do at least skim the 6 | documentation before posting tickets or mailing list questions, however! 7 | 8 | If you'd like to stay up to date on the community and development of Cerberus, 9 | there are several options: 10 | 11 | Blog 12 | ---- 13 | New releases are usually announced on `my Website `_. 14 | 15 | Mailing List 16 | ------------ 17 | The `mailing list`_ is intended to be a low traffic resource for users, 18 | developers and contributors of both the Cerberus and Eve projects. 19 | 20 | Issues tracker 21 | -------------- 22 | To file new bugs or search existing ones, you may visit `Issues`_ page. This 23 | does require a (free and easy to set up) GitHub account. 24 | 25 | GitHub repository 26 | ----------------- 27 | Of course the best way to track the development of Cerberus is through the 28 | `GitHub repo `_. 29 | 30 | .. _`mailing list`: https://groups.google.com/forum/#!forum/python-eve 31 | .. _`issues`: https://github.com/pyeve/cerberus/issues 32 | -------------------------------------------------------------------------------- /docs/contribute.rst: -------------------------------------------------------------------------------- 1 | How to Contribute 2 | ================= 3 | 4 | There are no plans to develop Cerberus further than the current feature set. 5 | Bug fixes and documentation improvements are welcome and will be published with 6 | yearly service releases. 7 | 8 | 9 | Making Changes 10 | -------------- 11 | * Fork_ the repository_ on GitHub. 12 | * Create a new topic branch from the ``1.3.x`` branch. 13 | * Make commits of logical units (if needed rebase your feature branch before 14 | submitting it). 15 | * Make sure your commit messages are in the `proper format`_. 16 | * If your commit fixes an open issue, reference it in the commit message. 17 | * Make sure you have added the necessary tests for your changes. 18 | * Run all the tests to assure nothing else was accidentally broken. 19 | * Install and enable pre-commit_ (``pip install pre-commit``, then ``pre-commit 20 | install``) to ensure styleguides and codechecks are followed. 21 | * Don't forget to add yourself to the ``AUTHORS.rst`` document. 22 | 23 | These guidelines also apply when helping with documentation (actually, for 24 | typos and minor additions you might choose to `fork and edit`_). 25 | 26 | 27 | Submitting Changes 28 | ------------------ 29 | * Push your changes to the topic branch in your fork of the repository. 30 | * Submit a `Pull Request`_. 31 | * Wait for maintainer feedback. Please be patient. 32 | 33 | 34 | Running the Tests 35 | ----------------- 36 | 37 | The easiest way to get started is to run the tests in your local environment 38 | with pytest_: 39 | 40 | .. code-block:: console 41 | 42 | $ pytest cerberus/tests 43 | 44 | 45 | Testing with other Python versions 46 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 47 | 48 | Before you submit a pull request, make sure your tests and changes run in 49 | all supported python versions. Instead of creating all those environments by 50 | hand, you can use tox_ that automatically manages virtual environments. Mind 51 | that the interpreters themselves need to be available on the system. 52 | 53 | .. code-block:: console 54 | 55 | $ pip install tox # First time only 56 | $ tox 57 | 58 | This might take some time the first run as the different virtual environments 59 | are created and dependencies are installed. 60 | 61 | If something goes wrong and one test fails, you might need to run that test in 62 | the specific python version. You can use the created environments to run some 63 | specific tests. For example, if a test suite fails in Python 3.11: 64 | 65 | .. code-block:: console 66 | 67 | $ tox -e py311 68 | 69 | Have a look at ``tox.ini`` for the available test environments and their setup. 70 | 71 | 72 | Running the benchmarks 73 | ~~~~~~~~~~~~~~~~~~~~~~ 74 | 75 | There's a benchmark suite that you can use to measure how changes imapact 76 | Cerberus' performance: 77 | 78 | .. code-block:: console 79 | 80 | $ pytest cerberus/benchmarks 81 | 82 | 83 | Building the HTML-documentation 84 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | 86 | To preview the rendered HTML-documentation you must initially install the 87 | documentation framework and a theme: 88 | 89 | .. code-block:: console 90 | 91 | $ pip install -r docs/requirements.txt 92 | 93 | The HTML build is triggered with: 94 | 95 | .. code-block:: console 96 | 97 | $ make -C docs html 98 | 99 | The result can be accessed by opening ``docs/_build/html/index.html``. 100 | 101 | 102 | .. _Fork: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo 103 | .. _`fork and edit`: https://github.blog/news-insights/the-library/forking-with-the-edit-button/ 104 | .. _pre-commit: https://pre-commit.com/ 105 | .. _`proper format`: https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html 106 | .. _`Pull Request`: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request 107 | .. _pytest: https://pytest.org 108 | .. _repository: https://github.com/pyeve/cerberus 109 | .. _tox: https://tox.readthedocs.io 110 | -------------------------------------------------------------------------------- /docs/customize.rst: -------------------------------------------------------------------------------- 1 | Extending Cerberus 2 | ================== 3 | 4 | Though you can use functions in conjunction with the ``coerce`` and the 5 | ``check_with`` rules, you can easily extend the :class:`~cerberus.Validator` 6 | class with custom ``rules``, ``types``, ``check_with`` handlers, ``coercers`` 7 | and ``default_setters``. 8 | While the function-based style is more suitable for special and one-off uses, 9 | a custom class leverages these possibilities: 10 | 11 | * custom rules can be defined with constrains in a schema 12 | * extending the available :ref:`type` s 13 | * use additional contextual data 14 | * schemas are serializable 15 | 16 | The references in schemas to these custom methods can use space characters 17 | instead of underscores, e.g. ``{'foo': {'check_with': 'is odd'}}`` is an alias 18 | for ``{'foo': {'check_with': 'is_odd'}}``. 19 | 20 | 21 | Custom Rules 22 | ------------ 23 | Suppose that in our use case some values can only be expressed as odd integers, 24 | therefore we decide to add support for a new ``is_odd`` rule to our validation 25 | schema: 26 | 27 | .. testcode:: 28 | 29 | schema = {'amount': {'is odd': True, 'type': 'integer'}} 30 | 31 | This is how we would go to implement that: 32 | 33 | .. testcode:: 34 | 35 | from cerberus import Validator 36 | 37 | class MyValidator(Validator): 38 | def _validate_is_odd(self, constraint, field, value): 39 | """ Test the oddity of a value. 40 | 41 | The rule's arguments are validated against this schema: 42 | {'type': 'boolean'} 43 | """ 44 | if constraint is True and not bool(value & 1): 45 | self._error(field, "Must be an odd number") 46 | 47 | By subclassing Cerberus :class:`~cerberus.Validator` class and adding the custom 48 | ``_validate_`` method, we just enhanced Cerberus to suit our needs. 49 | The custom rule ``is_odd`` is now available in our schema and, what really 50 | matters, we can use it to validate all odd values: 51 | 52 | .. doctest:: 53 | 54 | >>> v = MyValidator(schema) 55 | >>> v.validate({'amount': 10}) 56 | False 57 | >>> v.errors 58 | {'amount': ['Must be an odd number']} 59 | >>> v.validate({'amount': 9}) 60 | True 61 | 62 | As schemas themselves are validated, you can provide constraints as literal 63 | Python expression in the docstring of the rule's implementing method to 64 | validate the arguments given in a schema for that rule. Either the docstring 65 | contains solely the literal or the literal is placed at the bottom of the 66 | docstring preceded by 67 | ``The rule's arguments are validated against this schema:`` 68 | See the source of the contributed rules for more examples. 69 | 70 | 71 | .. _new-types: 72 | 73 | Custom Data Types 74 | ----------------- 75 | Cerberus supports and validates several standard data types (see :ref:`type`). 76 | When building a custom validator you can add and validate your own data types. 77 | 78 | Additional types can be added on the fly by assigning a 79 | :class:`~cerberus.TypeDefinition` to the designated type name in 80 | :attr:`~cerberus.Validator.types_mapping`: 81 | 82 | .. testcode:: 83 | 84 | from decimal import Decimal 85 | 86 | decimal_type = cerberus.TypeDefinition('decimal', (Decimal,), ()) 87 | 88 | Validator.types_mapping['decimal'] = decimal_type 89 | 90 | .. caution:: 91 | 92 | As the ``types_mapping`` property is a mutable type, any change to its 93 | items on an instance will affect its class. 94 | 95 | They can also be defined for subclasses of :class:`~cerberus.Validator`: 96 | 97 | .. testcode:: 98 | 99 | from decimal import Decimal 100 | 101 | decimal_type = cerberus.TypeDefinition('decimal', (Decimal,), ()) 102 | 103 | class MyValidator(Validator): 104 | types_mapping = Validator.types_mapping.copy() 105 | types_mapping['decimal'] = decimal_type 106 | 107 | 108 | .. versionadded:: 0.0.2 109 | 110 | .. versionchanged:: 1.0 111 | The type validation logic changed, see :doc:`upgrading`. 112 | 113 | .. versionchanged:: 1.2 114 | Added the :attr:`~cerberus.Validator.types_mapping` property and marked 115 | methods for testing types as deprecated. 116 | 117 | .. _check-with-rule-methods: 118 | 119 | Methods that can be referenced by the check_with rule 120 | ----------------------------------------------------- 121 | If a validation test doesn't depend on a specified constraint from a schema or 122 | needs to be more complex than a rule should be, it's possible to rather define 123 | it as *value checker* than as a rule. There are two ways to use the 124 | :ref:`check_with rule `. 125 | 126 | One is by extending :class:`~cerberus.Validator` with a method prefixed with 127 | ``_check_with_``. This allows to access the whole context of the validator 128 | instance including arbitrary configuration values and state. To reference such 129 | method using the ``check_with`` rule, simply pass the unprefixed method name as 130 | a string constraint. 131 | 132 | For example, one can define an ``oddity`` validator method as follows: 133 | 134 | .. testcode:: 135 | 136 | class MyValidator(Validator): 137 | def _check_with_oddity(self, field, value): 138 | if not value & 1: 139 | self._error(field, "Must be an odd number") 140 | 141 | Usage would look something like: 142 | 143 | .. testcode:: 144 | 145 | schema = {'amount': {'type': 'integer', 'check_with': 'oddity'}} 146 | 147 | The second option to use the rule is to define a standalone function and pass 148 | it as the constraint. This brings with it the benefit of not having to extend 149 | ``Validator``. To read more about this implementation and see examples check 150 | out the rule's documentation. 151 | 152 | .. _custom-coercer: 153 | 154 | Custom Coercers 155 | --------------- 156 | You can also define custom methods that return a ``coerce`` d value or point to 157 | a method as ``rename_handler``. The method name must be prefixed with 158 | ``_normalize_coerce_``. 159 | 160 | .. testcode:: 161 | 162 | class MyNormalizer(Validator): 163 | def __init__(self, multiplier, *args, **kwargs): 164 | super(MyNormalizer, self).__init__(*args, **kwargs) 165 | self.multiplier = multiplier 166 | 167 | def _normalize_coerce_multiply(self, value): 168 | return value * self.multiplier 169 | 170 | .. doctest:: 171 | 172 | >>> schema = {'foo': {'coerce': 'multiply'}} 173 | >>> document = {'foo': 2} 174 | >>> MyNormalizer(multiplier=2).normalized(document, schema) 175 | {'foo': 4} 176 | 177 | 178 | Custom Default Setters 179 | ---------------------- 180 | Similar to custom rename handlers, it is also possible to create custom default 181 | setters. 182 | 183 | .. testcode:: 184 | 185 | from datetime import datetime 186 | 187 | class MyNormalizer(Validator): 188 | def _normalize_default_setter_utcnow(self, document): 189 | return datetime.utcnow() 190 | 191 | .. doctest:: 192 | 193 | >>> schema = {'creation_date': {'type': 'datetime', 'default_setter': 'utcnow'}} 194 | >>> MyNormalizer().normalized({}, schema) 195 | {'creation_date': datetime.datetime(...)} 196 | 197 | 198 | Limitations 199 | ----------- 200 | It may be a bad idea to overwrite particular contributed rules. 201 | 202 | 203 | Attaching Configuration Data And Instantiating Custom Validators 204 | ---------------------------------------------------------------- 205 | It's possible to pass arbitrary configuration values when instantiating a 206 | :class:`~cerberus.Validator` or a subclass as keyword arguments (whose names 207 | are not used by Cerberus). These can be used in all of the handlers described 208 | in this document that have access to the instance. 209 | Cerberus ensures that this data is available in all child instances that may 210 | get spawned during processing. When you implement an ``__init__`` method on 211 | a customized validator, you must ensure that all positional and keyword 212 | arguments are also passed to the parent class' initialization method. Here's 213 | an example pattern: 214 | 215 | .. testcode:: 216 | 217 | class MyValidator(Validator): 218 | def __init__(self, *args, **kwargs): 219 | # assign a configuration value to an instance property 220 | # for convenience 221 | self.additional_context = kwargs.get('additional_context') 222 | # pass all data to the base classes 223 | super(MyValidator, self).__init__(*args, **kwargs) 224 | 225 | # alternatively a dynamic property can be defined, rendering 226 | # the __init__ method unnecessary in this example case 227 | @property 228 | def additional_context(self): 229 | return self._config.get('additional_context', 'bar') 230 | 231 | # an optional property setter if you deal with state 232 | @additional_context.setter 233 | def additional_context(self, value): 234 | self._config["additional_context"] = value 235 | 236 | def _check_with_foo(self, field, value): 237 | make_use_of(self.additional_context) 238 | 239 | .. warning:: 240 | 241 | It is neither recommended to access the ``_config`` property in other 242 | situations than outlined in the sketch above nor to to change its contents 243 | during the processing of a document. Both cases are not tested and are 244 | unlikely to get officially supported. 245 | 246 | .. versionadded:: 0.9 247 | 248 | There's a function :func:`~cerberus.utils.validator_factory` to get a 249 | :class:`Validator` mutant with concatenated docstrings. 250 | 251 | .. versionadded:: 1.0 252 | 253 | 254 | Relevant `Validator`-attributes 255 | ------------------------------- 256 | There are some attributes of a :class:`~cerberus.Validator` that you should be 257 | aware of when writing custom Validators. 258 | 259 | `Validator.document` 260 | ~~~~~~~~~~~~~~~~~~~~ 261 | 262 | A validator accesses the :attr:`~cerberus.Validator.document` property when 263 | fetching fields for validation. It also allows validation of a field to happen 264 | in context of the rest of the document. 265 | 266 | .. versionadded:: 0.7.1 267 | 268 | `Validator.schema` 269 | ~~~~~~~~~~~~~~~~~~ 270 | 271 | Alike, the :attr:`~cerberus.Validator.schema` property holds the used schema. 272 | 273 | .. note:: 274 | 275 | This attribute is not the same object that was passed as ``schema`` to the 276 | validator at some point. Also, its content may differ, though it still 277 | represents the initial constraints. It offers the same interface like a 278 | :class:`dict`. 279 | 280 | `Validator._error` 281 | ~~~~~~~~~~~~~~~~~~ 282 | 283 | There are three signatures that are accepted to submit errors to the 284 | ``Validator``'s error stash. If necessary the given information will be parsed 285 | into a new instance of :class:`~cerberus.errors.ValidationError`. 286 | 287 | Full disclosure 288 | ............... 289 | In order to be able to gain complete insight into the context of an error at a 290 | later point, you need to call :meth:`~cerberus.Validator._error` with two 291 | mandatory arguments: 292 | 293 | - the field where the error occurred 294 | - an instance of a :class:`~cerberus.errors.ErrorDefinition` 295 | 296 | For custom rules you need to define an error as ``ErrorDefinition`` with a 297 | unique id and the causing rule that is violated. See :mod:`~cerberus.errors` 298 | for a list of the contributed error definitions. Keep in mind that bit 7 marks 299 | a group error, bit 5 marks an error raised by a validation against different 300 | sets of rules. 301 | 302 | Optionally you can submit further arguments as information. Error handlers 303 | that are targeted for humans will use these as positional arguments when 304 | formatting a message with :py:meth:`str.format`. Serializing handlers will keep 305 | these values in a list. 306 | 307 | .. versionadded:: 1.0 308 | 309 | Simple custom errors 310 | .................... 311 | A simpler form is to call :meth:`~cerberus._error` with the field and a string 312 | as message. However the resulting error will contain no information about the 313 | violated constraint. This is supposed to maintain backward compatibility, but 314 | can also be used when an in-depth error handling isn't needed. 315 | 316 | Multiple errors 317 | ............... 318 | When using child-validators, it is a convenience to submit all their errors 319 | ; which is a list of :class:`~cerberus.errors.ValidationError` instances. 320 | 321 | .. versionadded:: 1.0 322 | 323 | `Validator._get_child_validator` 324 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 325 | 326 | If you need another instance of your :class:`~cerberus.Validator`-subclass, the 327 | :meth:`~cerberus.Validator._get_child_validator`-method returns another 328 | instance that is initiated with the same arguments as ``self`` was. You can 329 | specify overriding keyword-arguments. 330 | As the properties ``document_path`` and ``schema_path`` (see below) are 331 | inherited by the child validator, you can extend these by passing a single 332 | value or values-tuple with the keywords ``document_crumb`` and 333 | ``schema_crumb``. 334 | Study the source code for example usages. 335 | 336 | .. versionadded:: 0.9 337 | 338 | .. versionchanged:: 1.0 339 | Added ``document_crumb`` and ``schema_crumb`` as optional keyword- 340 | arguments. 341 | 342 | `Validator.root_document`, `.root_schema`, `.root_allow_unknown` & `.root_require_all` 343 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 344 | 345 | A child-validator - as used when validating a ``schema`` - can access the first 346 | generation validator's document and schema that are being processed as well as 347 | the constraints for unknown fields via its ``root_document``, ``root_schema``, 348 | ``root_allow_unknown`` and ``root_require_all`` properties. 349 | 350 | .. versionadded:: 1.0 351 | 352 | .. versionchanged:: 1.3 353 | Added ``root_require_all`` 354 | 355 | `Validator.document_path` & `Validator.schema_path` 356 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 357 | 358 | These properties maintain the path of keys within the document respectively the 359 | schema that was traversed by possible parent-validators. Both will be used as 360 | base path when an error is submitted. 361 | 362 | .. versionadded:: 1.0 363 | 364 | `Validator.recent_error` 365 | ~~~~~~~~~~~~~~~~~~~~~~~~ 366 | 367 | The last single error that was submitted is accessible through the 368 | ``recent_error``-attribute. 369 | 370 | .. versionadded:: 1.0 371 | 372 | `Validator.mandatory_validations`, `Validator.priority_validations` & `Validator._remaining_rules` 373 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 374 | 375 | You can use these class properties and instance instance property if you want 376 | to adjust the validation logic for each field validation. 377 | ``mandatory_validations`` is a tuple that contains rules that will be validated 378 | for each field, regardless if the rule is defined for a field in a schema or 379 | not. 380 | ``priority_validations`` is a tuple of ordered rules that will be validated 381 | before any other. 382 | ``_remaining_rules`` is a list that is populated under consideration of these 383 | and keeps track of the rules that are next in line to be evaluated. Thus it can 384 | be manipulated by rule handlers to change the remaining validation for the 385 | current field. 386 | Preferably you would call :meth:`~cerberus.Validator._drop_remaining_rules` 387 | to remove particular rules or all at once. 388 | 389 | .. versionadded:: 1.0 390 | 391 | .. versionchanged:: 1.2 392 | Added ``_remaining_rules`` for extended leverage. 393 | -------------------------------------------------------------------------------- /docs/errors.rst: -------------------------------------------------------------------------------- 1 | Errors & Error Handling 2 | ======================= 3 | 4 | Errors can be evaluated via Python interfaces or be processed to different 5 | output formats with error handlers. 6 | 7 | 8 | Error Handlers 9 | -------------- 10 | 11 | Error handlers will return different output via the 12 | :attr:`~cerberus.Validator.errors` property of a validator after the processing 13 | of a document. They base on :class:`~cerberus.errors.BaseErrorHandler` which 14 | defines the mandatory interface. The error handler to be used can be passed as 15 | keyword-argument ``error_handler`` to the initialization of a validator or by 16 | setting it's property with the same name at any time. On initialization either 17 | an instance or a class can be provided. To pass keyword-arguments to the 18 | initialization of a class, provide a two-value tuple with the error handler 19 | class and the dictionary containing the arguments. 20 | 21 | The following handlers are available: 22 | 23 | - :class:`~cerberus.errors.BasicErrorHandler`: This is the **default** that 24 | returns a dictionary. The keys refer to the document's ones and the values 25 | are lists containing error messages. Errors of nested fields are kept in a 26 | dictionary as last item of these lists. 27 | 28 | 29 | Python interfaces 30 | ----------------- 31 | 32 | An error is represented as :class:`~cerberus.errors.ValidationError` that has 33 | the following properties: 34 | 35 | - ``document_path``: The path within the document. For flat dictionaries 36 | this simply is a key's name in a tuple, for nested ones it's all traversed 37 | key names. Items in sequences are represented by their index. 38 | - ``schema_path``: The path within the schema. 39 | - ``code``: The unique identifier for an error. See :ref:`error-codes` for a 40 | list. 41 | - ``rule``: The rule that was evaluated when the error occurred. 42 | - ``constraint``: That rule's constraint. 43 | - ``value``: The value being validated. 44 | - ``info``: This tuple contains additional information that were submitted 45 | with the error. For most errors this is actually nothing. For bulk 46 | validations (e.g. with ``items`` or ``keysrules``) this property keeps 47 | all individual errors. 48 | See the implementation of a rule in the source code to figure out its 49 | additional logging. 50 | 51 | You can access the errors per these properties of a :class:`~cerberus.Validator` 52 | instance after a processing of a document: 53 | 54 | - ``_errors``: This :class:`~cerberus.errors.ErrorsList` instance holds all 55 | submitted errors. It is not intended to manipulate errors directly via this 56 | attribute. You can test if at least one error with a specific error 57 | definition is ``in`` that list. 58 | 59 | - ``document_error_tree``: A ``dict``-like object that allows one to query 60 | nodes corresponding to your document. 61 | The subscript notation on a node allows one to fetch either a specific error 62 | that matches the given :class:`~cerberus.errors.ErrorDefinition` or a child 63 | node with the given key. 64 | If there's no matching error respectively no errors occurred in a node or 65 | below, :obj:`None` will be returned instead. 66 | A node can also be tested with the ``in`` operator with either an 67 | :class:`~cerberus.errors.ErrorDefinition` or a possible child node's key. 68 | A node's errors are contained in its :attr:`errors` property which is also 69 | an :class:`~cerberus.errors.ErrorsList`. Its members are yielded when 70 | iterating over a node. 71 | - ``schema_error_tree``: Similarly for the used schema. 72 | 73 | .. versionchanged:: 1.0 74 | Errors are stored as :class:`~cerberus.errors.ValidationError` in a 75 | :class:`~cerberus.errors.ErrorList`. 76 | 77 | Examples 78 | ~~~~~~~~ 79 | 80 | .. doctest:: 81 | 82 | >>> schema = {'cats': {'type': 'integer'}} 83 | >>> document = {'cats': 'two'} 84 | >>> v.validate(document, schema) 85 | False 86 | >>> cerberus.errors.BAD_TYPE in v._errors 87 | True 88 | >>> v.document_error_tree['cats'].errors == v.schema_error_tree['cats']['type'].errors 89 | True 90 | >>> cerberus.errors.BAD_TYPE in v.document_error_tree['cats'] 91 | True 92 | >>> v.document_error_tree['cats'][cerberus.errors.BAD_TYPE] \ 93 | ... == v.document_error_tree['cats'].errors[0] 94 | True 95 | >>> error = v.document_error_tree['cats'].errors[0] 96 | >>> error.document_path 97 | ('cats',) 98 | >>> error.schema_path 99 | ('cats', 'type') 100 | >>> error.rule 101 | 'type' 102 | >>> error.constraint 103 | 'integer' 104 | >>> error.value 105 | 'two' 106 | -------------------------------------------------------------------------------- /docs/external_resources.rst: -------------------------------------------------------------------------------- 1 | External resources 2 | ================== 3 | 4 | Here are some recommended resources that deal with Cerberus. 5 | If you find something interesting on the web, please amend it to this document 6 | and open a pull request (see :doc:`contribute`). 7 | 8 | Community forums 9 | ---------------- 10 | 11 | There's a `cerberus tag `_ 12 | on the Question & Answers platform *Stackoverflow*. The 13 | `Google Group `_ 14 | regarding the mother project *Eve* may also a spot to seek these. 15 | 16 | 7 Best Python Libraries For Validating Data (February 2018) 17 | ----------------------------------------------------------- 18 | 19 | `Clickbait `_ 20 | that mentions Cerberus. It's a starting point to compare libraries with a 21 | similar scope though. 22 | 23 | Nicola Iarocci: Cerberus, or Data Validation for Humans (November 2017) 24 | ----------------------------------------------------------------------- 25 | 26 | Get fastened for the full tour on Cerberus that Nicola gave in a 27 | `talk `_ at PiterPy 2017. 28 | No bit is missed, so don't miss it! 29 | The talk also includes a sample of the actual pronunciation of Iarocci as 30 | extra takeaway. 31 | 32 | Henry Ölsner: Validate JSON data using cerberus (March 2016) 33 | ------------------------------------------------------------ 34 | 35 | In this `blog post `_ 36 | the author describes how to validate network configurations with a schema noted 37 | in YAML. The article that doesn't spare on code snippets develops the 38 | resulting schema by gradually increasing its complexity. A custom type check is 39 | also implemented, but be aware that version *0.9.2* is used. With 1.0 and later 40 | the implementation should look like this: 41 | 42 | .. code-block:: python 43 | 44 | def _validate_type_ipv4address(self, value): 45 | try: 46 | ipaddress.IPv4Address(value) 47 | except: 48 | return False 49 | else: 50 | return True 51 | -------------------------------------------------------------------------------- /docs/faq.rst: -------------------------------------------------------------------------------- 1 | Frequently Asked Questions 2 | ========================== 3 | 4 | Can I use Cerberus to validate objects? 5 | --------------------------------------- 6 | 7 | Yes. See `Validating user objects with Cerberus `_. 8 | 9 | Are Cerberus validators thread-safe, can they be used in different threads? 10 | --------------------------------------------------------------------------- 11 | 12 | The normalization and validation methods of validators make a copy of the 13 | provided document and store it as :attr:`~cerberus.Validator.document` 14 | property. Because of this it is advised to create a new 15 | :class:`~cerberus.Validator` instance for each processed document when used in 16 | a multi-threaded context. Alternatively you can use a 17 | :class:`py3:threading.Lock` to confirm that only one document processing is 18 | running at any given time. 19 | -------------------------------------------------------------------------------- /docs/includes/.gitignore: -------------------------------------------------------------------------------- 1 | *.rst 2 | -------------------------------------------------------------------------------- /docs/includes/generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import importlib 4 | from operator import attrgetter 5 | from pathlib import Path 6 | from pprint import pformat 7 | from textwrap import indent 8 | from types import SimpleNamespace 9 | 10 | 11 | INCLUDES_DIR = Path(__file__).parent.resolve() 12 | CERBERUS_DIR = INCLUDES_DIR.parent.parent / 'cerberus' 13 | 14 | 15 | def load_module_members(name, path): 16 | module_spec = importlib.util.spec_from_file_location(name, path) 17 | _module = importlib.util.module_from_spec(module_spec) 18 | module_spec.loader.exec_module(_module) 19 | return vars(_module) 20 | 21 | 22 | errors_module = load_module_members('errors', CERBERUS_DIR / 'errors.py') 23 | error_type = errors_module['ErrorDefinition'] 24 | error_definitions = [] 25 | for name, member in errors_module.items(): 26 | if not isinstance(member, error_type): 27 | continue 28 | error_definition = SimpleNamespace(**member._asdict()) 29 | error_definition.name = name 30 | error_definitions.append(error_definition) 31 | error_definitions.sort(key=attrgetter('code')) 32 | 33 | with (INCLUDES_DIR / 'error-codes.rst').open('wt') as f: 34 | print( 35 | """ 36 | .. list-table:: 37 | :header-rows: 1 38 | 39 | * - Code (dec.) 40 | - Code (hex.) 41 | - Name 42 | - Rule""".lstrip( 43 | '\n' 44 | ), 45 | file=f, 46 | ) 47 | for error_definition in error_definitions: 48 | print( 49 | f""" 50 | * - {error_definition.code} 51 | - {hex(error_definition.code)} 52 | - {error_definition.name} 53 | - {error_definition.rule}""".lstrip( 54 | '\n' 55 | ), 56 | file=f, 57 | ) 58 | 59 | print('Generated table with ErrorDefinitions.') 60 | 61 | 62 | validator_module = load_module_members('validator', CERBERUS_DIR / 'validator.py') 63 | validator = validator_module['Validator']() 64 | schema_validation_schema = pformat( 65 | validator.rules, width=68 66 | ) # width seems w/o effect, use black? 67 | with (INCLUDES_DIR / 'schema-validation-schema.rst').open('wt') as f: 68 | print( 69 | '.. code-block:: python\n\n', indent(schema_validation_schema, ' '), file=f 70 | ) 71 | 72 | print("Generated schema for a vanilla validator's, well, schema.") 73 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to Cerberus 2 | =================== 3 | 4 | ``CERBERUS``, n. The watch-dog of Hades, whose duty it was to guard the 5 | entrance; everybody, sooner or later, had to go there, and nobody wanted to 6 | carry off the entrance. 7 | - *Ambrose Bierce, The Devil's Dictionary* 8 | 9 | Cerberus provides powerful yet simple and lightweight data validation 10 | functionality out of the box and is designed to be easily extensible, allowing 11 | for custom validation. It has no dependencies and is thoroughly. 12 | 13 | At a Glance 14 | ----------- 15 | You define a validation schema and pass it to an instance of the 16 | :class:`~cerberus.Validator` class: :: 17 | 18 | >>> schema = {'name': {'type': 'string'}} 19 | >>> v = Validator(schema) 20 | 21 | Then you simply invoke the :meth:`~cerberus.Validator.validate` to validate 22 | a dictionary against the schema. If validation succeeds, ``True`` is returned: 23 | 24 | :: 25 | 26 | >>> document = {'name': 'john doe'} 27 | >>> v.validate(document) 28 | True 29 | 30 | 31 | Table of Contents 32 | ----------------- 33 | .. toctree:: 34 | :maxdepth: 2 35 | 36 | Installation 37 | Usage 38 | schemas 39 | validation-rules 40 | normalization-rules 41 | errors 42 | Extending 43 | Contributing 44 | API 45 | FAQ 46 | external_resources 47 | changelog 48 | upgrading 49 | authors 50 | contact 51 | license 52 | 53 | Copyright Notice 54 | ---------------- 55 | Cerberus is an open source project by `Nicola Iarocci 56 | `_. See the original `LICENSE 57 | `_ for more 58 | information. 59 | 60 | .. _`Cerberus campaign on Patreon`: https://www.patreon.com/nicolaiarocci 61 | -------------------------------------------------------------------------------- /docs/install.rst: -------------------------------------------------------------------------------- 1 | Cerberus Installation 2 | ===================== 3 | 4 | This part of the documentation covers the installation of Cerberus. The first 5 | step to using any software package is getting it properly installed. Please 6 | refer to one of the many established ways to work in project-specific virtual 7 | environments, i.e. the `Virtual Environments and Packages`_ section of the 8 | Pyton documentation. 9 | 10 | 11 | Stable Version 12 | -------------- 13 | 14 | Cerberus is on the PyPI_ so all you need to do is: 15 | 16 | .. code-block:: console 17 | 18 | $ pip install cerberus 19 | 20 | 21 | Development Version 22 | ------------------- 23 | 24 | Obtain the source (either as source distribution from the PyPI, with ``git`` or 25 | other means that the Github platform provides) and use the following command 26 | in the source's root directory for an editable installation. Subsequent changes 27 | to the source code will affect its following execution without re-installation. 28 | 29 | .. code-block:: console 30 | 31 | $ pip install -e . 32 | 33 | 34 | .. _GitHub Repository: https://github.com/pyeve/cerberus 35 | .. _PyPI: https://pypi.org/project/Cerberus 36 | .. _Virtual Environments and Packages: https://docs.python.org/3/tutorial/venv.html 37 | -------------------------------------------------------------------------------- /docs/license.rst: -------------------------------------------------------------------------------- 1 | License 2 | ======= 3 | 4 | Cerberus is an open source project by `Nicola Iarocci `_. 5 | 6 | .. include:: ../LICENSE 7 | -------------------------------------------------------------------------------- /docs/normalization-rules.rst: -------------------------------------------------------------------------------- 1 | Normalization Rules 2 | =================== 3 | 4 | Normalization rules are applied to fields, also in ``schema`` for mappings, as 5 | well when defined as a bulk operation by ``schema`` (for sequences), 6 | ``allow_unknown``, ``keysrules`` and ``valuesrules``. Normalization rules 7 | in definitions for testing variants like with ``anyof`` are not processed. 8 | 9 | The normalizations are applied as given in this document for each level in the 10 | mapping, traversing depth-first. 11 | 12 | Renaming Of Fields 13 | ------------------ 14 | You can define a field to be renamed before any further processing. 15 | 16 | .. doctest:: 17 | 18 | >>> v = Validator({'foo': {'rename': 'bar'}}) 19 | >>> v.normalized({'foo': 0}) 20 | {'bar': 0} 21 | 22 | To let a callable rename a field or arbitrary fields, you can define a handler 23 | for renaming. If the constraint is a string, it points to a 24 | :doc:`custom method `. If the constraint is an iterable, the value 25 | is processed through that chain. 26 | 27 | .. doctest:: 28 | 29 | >>> v = Validator({}, allow_unknown={'rename_handler': int}) 30 | >>> v.normalized({'0': 'foo'}) 31 | {0: 'foo'} 32 | 33 | .. doctest:: 34 | 35 | >>> even_digits = lambda x: '0' + x if len(x) % 2 else x 36 | >>> v = Validator({}, allow_unknown={'rename_handler': [str, even_digits]}) 37 | >>> v.normalized({1: 'foo'}) 38 | {'01': 'foo'} 39 | 40 | 41 | .. versionadded:: 1.0 42 | 43 | .. _purging-unknown-fields: 44 | 45 | Purging Unknown Fields 46 | ---------------------- 47 | After renaming, unknown fields will be purged if the 48 | :attr:`~cerberus.Validator.purge_unknown` property of a 49 | :class:`~cerberus.Validator` instance is ``True``; it defaults to ``False``. 50 | You can set the property per keyword-argument upon initialization or as rule for 51 | subdocuments like ``allow_unknown`` (see :ref:`allowing-the-unknown`). The default is 52 | ``False``. 53 | If a subdocument includes an ``allow_unknown`` rule then unknown fields 54 | will not be purged on that subdocument. 55 | 56 | .. doctest:: 57 | 58 | >>> v = Validator({'foo': {'type': 'string'}}, purge_unknown=True) 59 | >>> v.normalized({'bar': 'foo'}) 60 | {} 61 | 62 | .. versionadded:: 1.0 63 | 64 | .. _default-values: 65 | 66 | Default Values 67 | -------------- 68 | You can set default values for missing fields in the document by using the ``default`` rule. 69 | 70 | .. doctest:: 71 | 72 | >>> v.schema = {'amount': {'type': 'integer'}, 'kind': {'type': 'string', 'default': 'purchase'}} 73 | >>> v.normalized({'amount': 1}) == {'amount': 1, 'kind': 'purchase'} 74 | True 75 | 76 | >>> v.normalized({'amount': 1, 'kind': None}) == {'amount': 1, 'kind': 'purchase'} 77 | True 78 | 79 | >>> v.normalized({'amount': 1, 'kind': 'other'}) == {'amount': 1, 'kind': 'other'} 80 | True 81 | 82 | You can also define a default setter callable to set the default value 83 | dynamically. The callable gets called with the current (sub)document as the 84 | only argument. Callables can even depend on one another, but normalizing will 85 | fail if there is a unresolvable/circular dependency. If the constraint is a 86 | string, it points to a :doc:`custom method `. 87 | 88 | .. doctest:: 89 | 90 | >>> v.schema = {'a': {'type': 'integer'}, 'b': {'type': 'integer', 'default_setter': lambda doc: doc['a'] + 1}} 91 | >>> v.normalized({'a': 1}) == {'a': 1, 'b': 2} 92 | True 93 | 94 | >>> v.schema = {'a': {'type': 'integer', 'default_setter': lambda doc: doc['not_there']}} 95 | >>> v.normalized({}) 96 | >>> v.errors 97 | {'a': ["default value for 'a' cannot be set: Circular dependencies of default setters."]} 98 | 99 | You can even use both ``default`` and :ref:`readonly` on the same field. This 100 | will create a field that cannot be assigned a value manually but it will be 101 | automatically supplied with a default value by Cerberus. Of course the same 102 | applies for ``default_setter``. 103 | 104 | .. versionchanged:: 1.0.2 105 | Can be used in conjunction with :ref:`readonly`. 106 | 107 | .. versionadded:: 1.0 108 | 109 | .. _type-coercion: 110 | 111 | Value Coercion 112 | -------------- 113 | Coercion allows you to apply a callable (given as object or the name of a 114 | :ref:`custom coercion method `) to a value before the document 115 | is validated. The return value of the callable replaces the new value in the 116 | document. This can be used to convert values or sanitize data before it is 117 | validated. If the constraint is an iterable of callables and names, the value 118 | is processed through that chain of coercers. 119 | 120 | .. doctest:: 121 | 122 | >>> v.schema = {'amount': {'type': 'integer'}} 123 | >>> v.validate({'amount': '1'}) 124 | False 125 | 126 | >>> v.schema = {'amount': {'type': 'integer', 'coerce': int}} 127 | >>> v.validate({'amount': '1'}) 128 | True 129 | >>> v.document 130 | {'amount': 1} 131 | 132 | >>> to_bool = lambda v: v.lower() in ('true', '1') 133 | >>> v.schema = {'flag': {'type': 'boolean', 'coerce': (str, to_bool)}} 134 | >>> v.validate({'flag': 'true'}) 135 | True 136 | >>> v.document 137 | {'flag': True} 138 | 139 | .. versionadded:: 0.9 140 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | furo 2 | -------------------------------------------------------------------------------- /docs/schemas.rst: -------------------------------------------------------------------------------- 1 | Validation Schemas 2 | ================== 3 | 4 | A validation schema is a :term:`mapping`, usually a :class:`dict`. Schema keys 5 | are the keys allowed in the target dictionary. Schema values express the rules 6 | that must be matched by the corresponding target values. 7 | 8 | .. testcode:: 9 | 10 | schema = {'name': {'type': 'string', 'maxlength': 10}} 11 | 12 | In the example above we define a target dictionary with only one key, ``name``, 13 | which is expected to be a string not longer than 10 characters. Something like 14 | ``{'name': 'john doe'}`` would validate, while something like ``{'name': 'a 15 | very long string'}`` or ``{'name': 99}`` would not. 16 | 17 | By default all keys in a document are optional unless the :ref:`required`-rule 18 | is set ``True`` for individual fields or the validator's :attr:~cerberus.Validator.require_all 19 | is set to ``True`` in order to expect all schema-defined fields to be present in the document. 20 | 21 | 22 | Registries 23 | ---------- 24 | 25 | There are two default registries in the cerberus module namespace where you can 26 | store definitions for schemas and rules sets which then can be referenced in a 27 | validation schema. You can furthermore instantiate more 28 | :class:`~cerberus.Registry` objects and bind them to the 29 | :attr:`~cerberus.Validator.rules_set_registry` or 30 | :attr:`~cerberus.Validator.schema_registry` of a validator. You may also set 31 | these as keyword-arguments upon intitialization. 32 | 33 | Using registries is particularly interesting if 34 | 35 | - schemas shall include references to themselves, vulgo: schema recursion 36 | - schemas contain a lot of reused parts and are supposed to be 37 | :ref:`serialized ` 38 | 39 | 40 | .. doctest:: 41 | 42 | >>> from cerberus import schema_registry 43 | >>> schema_registry.add('non-system user', 44 | ... {'uid': {'min': 1000, 'max': 0xffff}}) 45 | >>> schema = {'sender': {'schema': 'non-system user', 46 | ... 'allow_unknown': True}, 47 | ... 'receiver': {'schema': 'non-system user', 48 | ... 'allow_unknown': True}} 49 | 50 | .. doctest:: 51 | 52 | >>> from cerberus import rules_set_registry 53 | >>> rules_set_registry.extend((('boolean', {'type': 'boolean'}), 54 | ... ('booleans', {'valuesrules': 'boolean'}))) 55 | >>> schema = {'foo': 'booleans'} 56 | 57 | 58 | Validation 59 | ---------- 60 | 61 | Validation schemas themselves are validated when passed to the validator or a 62 | new set of rules is set for a document's field. A :exc:`~cerberus.SchemaError` 63 | is raised when an invalid validation schema is encountered. See 64 | :ref:`schema-validation-schema` for a reference. 65 | 66 | However, be aware that no validation can be triggered for all changes below 67 | that level or when a used definition in a registry changes. You could therefore 68 | trigger a validation and catch the exception: 69 | 70 | >>> v = Validator({'foo': {'allowed': []}}) 71 | >>> v.schema['foo'] = {'allowed': 1} 72 | Traceback (most recent call last): 73 | File "", line 1, in 74 | File "cerberus/schema.py", line 99, in __setitem__ 75 | self.validate({key: value}) 76 | File "cerberus/schema.py", line 126, in validate 77 | self._validate(schema) 78 | File "cerberus/schema.py", line 141, in _validate 79 | raise SchemaError(self.schema_validator.errors) 80 | SchemaError: {'foo': {'allowed': 'must be of container type'}} 81 | >>> v.schema['foo']['allowed'] = 'strings are no valid constraint for allowed' 82 | >>> v.schema.validate() 83 | Traceback (most recent call last): 84 | File "", line 1, in 85 | File "cerberus/schema.py", line 126, in validate 86 | self._validate(schema) 87 | File "cerberus/schema.py", line 141, in _validate 88 | raise SchemaError(self.schema_validator.errors) 89 | SchemaError: {'foo': {'allowed': 'must be of container type'}} 90 | 91 | 92 | .. _schema-serialization: 93 | 94 | Serialization 95 | ------------- 96 | 97 | Cerberus schemas are built with vanilla Python types: ``dict``, ``list``, 98 | ``string``, etc. Even user-defined validation rules are invoked in the schema 99 | by name as a string. A useful side effect of this design is that schemas can 100 | be defined in a number of ways, for example with PyYAML_. 101 | 102 | .. doctest:: 103 | 104 | >>> import yaml 105 | >>> schema_text = ''' 106 | ... name: 107 | ... type: string 108 | ... age: 109 | ... type: integer 110 | ... min: 10 111 | ... ''' 112 | >>> schema = yaml.safe_load(schema_text) 113 | >>> document = {'name': 'Little Joe', 'age': 5} 114 | >>> v.validate(document, schema) 115 | False 116 | >>> v.errors 117 | {'age': ['min value is 10']} 118 | 119 | You don't have to use YAML of course, you can use your favorite serializer. 120 | :mod:`json` for example. As long as there is a decoder that can produce a nested 121 | ``dict``, you can use it to define a schema. 122 | 123 | For populating and dumping one of the registries, use 124 | :meth:`~cerberus.Registry.extend` and :meth:`~cerberus.Registry.all`. 125 | 126 | .. _PyYAML: https://pyyaml.org 127 | -------------------------------------------------------------------------------- /docs/upgrading.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../UPGRADING.rst 2 | -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | Cerberus Usage 2 | ============== 3 | 4 | Basic Usage 5 | ----------- 6 | You define a validation schema and pass it to an instance of the 7 | :class:`~cerberus.Validator` class: 8 | 9 | .. doctest:: 10 | 11 | >>> schema = {'name': {'type': 'string'}} 12 | >>> v = Validator(schema) 13 | 14 | Then you simply invoke the :meth:`~cerberus.Validator.validate` to validate 15 | a dictionary against the schema. If validation succeeds, ``True`` is returned: 16 | 17 | .. testsetup:: 18 | 19 | schema = {'name': {'type': 'string'}} 20 | v = Validator(schema) 21 | document = {'name': 'john doe'} 22 | 23 | .. doctest:: 24 | 25 | >>> document = {'name': 'john doe'} 26 | >>> v.validate(document) 27 | True 28 | 29 | Alternatively, you can pass both the dictionary and the schema to the 30 | :meth:`~cerberus.Validator.validate` method: 31 | 32 | .. doctest:: 33 | 34 | >>> v = Validator() 35 | >>> v.validate(document, schema) 36 | True 37 | 38 | Which can be handy if your schema is changing through the life of the 39 | instance. 40 | 41 | Details about validation schemas are covered in :doc:`schemas`. 42 | See :doc:`validation-rules` and :doc:`normalization-rules` for an extensive 43 | documentation of all supported rules. 44 | 45 | Unlike other validation tools, Cerberus will not halt and raise an exception on 46 | the first validation issue. The whole document will always be processed, and 47 | ``False`` will be returned if validation failed. You can then access the 48 | :attr:`~cerberus.Validator.errors` property to obtain a list of issues. See 49 | :doc:`Errors & Error Handling ` for different output options. 50 | 51 | .. doctest:: 52 | 53 | >>> schema = {'name': {'type': 'string'}, 'age': {'type': 'integer', 'min': 10}} 54 | >>> document = {'name': 'Little Joe', 'age': 5} 55 | >>> v.validate(document, schema) 56 | False 57 | >>> v.errors 58 | {'age': ['min value is 10']} 59 | 60 | A :exc:`~cerberus.DocumentError` is raised when the document is not a mapping. 61 | 62 | The Validator class and its instances are callable, allowing for the following 63 | shorthand syntax: 64 | 65 | .. doctest:: 66 | 67 | >>> document = {'name': 'john doe'} 68 | >>> v(document) 69 | True 70 | 71 | .. versionadded:: 0.4.1 72 | 73 | 74 | .. _allowing-the-unknown: 75 | 76 | Allowing the Unknown 77 | -------------------- 78 | By default only keys defined in the schema are allowed: 79 | 80 | .. doctest:: 81 | 82 | >>> schema = {'name': {'type': 'string', 'maxlength': 10}} 83 | >>> v.validate({'name': 'john', 'sex': 'M'}, schema) 84 | False 85 | >>> v.errors 86 | {'sex': ['unknown field']} 87 | 88 | However, you can allow unknown document keys pairs by either setting 89 | ``allow_unknown`` to ``True``: 90 | 91 | .. doctest:: 92 | 93 | >>> v.schema = {} 94 | >>> v.allow_unknown = True 95 | >>> v.validate({'name': 'john', 'sex': 'M'}) 96 | True 97 | 98 | Or you can set ``allow_unknown`` to a validation schema, in which case 99 | unknown fields will be validated against it: 100 | 101 | .. doctest:: 102 | 103 | >>> v.schema = {} 104 | >>> v.allow_unknown = {'type': 'string'} 105 | >>> v.validate({'an_unknown_field': 'john'}) 106 | True 107 | >>> v.validate({'an_unknown_field': 1}) 108 | False 109 | >>> v.errors 110 | {'an_unknown_field': ['must be of string type']} 111 | 112 | ``allow_unknown`` can also be set at initialization: 113 | 114 | .. doctest:: 115 | 116 | >>> v = Validator({}, allow_unknown=True) 117 | >>> v.validate({'name': 'john', 'sex': 'M'}) 118 | True 119 | >>> v.allow_unknown = False 120 | >>> v.validate({'name': 'john', 'sex': 'M'}) 121 | False 122 | 123 | ``allow_unknown`` can also be set as rule to configure a validator for a nested 124 | mapping that is checked against the :ref:`schema ` rule: 125 | 126 | .. doctest:: 127 | 128 | >>> v = Validator() 129 | >>> v.allow_unknown 130 | False 131 | 132 | >>> schema = { 133 | ... 'name': {'type': 'string'}, 134 | ... 'a_dict': { 135 | ... 'type': 'dict', 136 | ... 'allow_unknown': True, # this overrides the behaviour for 137 | ... 'schema': { # the validation of this definition 138 | ... 'address': {'type': 'string'} 139 | ... } 140 | ... } 141 | ... } 142 | 143 | >>> v.validate({'name': 'john', 144 | ... 'a_dict': {'an_unknown_field': 'is allowed'}}, 145 | ... schema) 146 | True 147 | 148 | >>> # this fails as allow_unknown is still False for the parent document. 149 | >>> v.validate({'name': 'john', 150 | ... 'an_unknown_field': 'is not allowed', 151 | ... 'a_dict':{'an_unknown_field': 'is allowed'}}, 152 | ... schema) 153 | False 154 | 155 | >>> v.errors 156 | {'an_unknown_field': ['unknown field']} 157 | 158 | .. versionchanged:: 0.9 159 | ``allow_unknown`` can also be set for nested dict fields. 160 | 161 | .. versionchanged:: 0.8 162 | ``allow_unknown`` can also be set to a validation schema. 163 | 164 | 165 | .. _requiring-all: 166 | 167 | Requiring all 168 | ------------- 169 | 170 | By default any keys defined in the schema are not required. 171 | However, you can require all document keys pairs by setting 172 | ``require_all`` to ``True`` at validator initialization (``v = Validator(…, require_all=True)``) 173 | or change it latter via attribute access (``v.require_all = True``). 174 | ``require_all`` can also be set :ref:`as rule ` to configure a 175 | validator for a subdocument that is checked against the 176 | :ref:`schema ` rule: 177 | 178 | .. doctest:: 179 | 180 | >>> v = Validator() 181 | >>> v.require_all 182 | False 183 | 184 | >>> schema = { 185 | ... 'name': {'type': 'string'}, 186 | ... 'a_dict': { 187 | ... 'type': 'dict', 188 | ... 'require_all': True, 189 | ... 'schema': { 190 | ... 'address': {'type': 'string'} 191 | ... } 192 | ... } 193 | ... } 194 | 195 | >>> v.validate({'name': 'foo', 'a_dict': {}}, schema) 196 | False 197 | >>> v.errors 198 | {'a_dict': [{'address': ['required field']}]} 199 | 200 | >>> v.validate({'a_dict': {'address': 'foobar'}}, schema) 201 | True 202 | 203 | .. versionadded:: 1.3 204 | 205 | Fetching Processed Documents 206 | ---------------------------- 207 | 208 | The normalization and coercion are performed on the copy of the original 209 | document and the result document is available via ``document``-property. 210 | 211 | .. doctest:: 212 | 213 | >>> v.schema = {'amount': {'type': 'integer', 'coerce': int}} 214 | >>> v.validate({'amount': '1'}) 215 | True 216 | >>> v.document 217 | {'amount': 1} 218 | 219 | Beside the ``document``-property a ``Validator``-instance has shorthand methods 220 | to process a document and fetch its processed result. 221 | 222 | `validated` Method 223 | ~~~~~~~~~~~~~~~~~~ 224 | There's a wrapper-method :meth:`~cerberus.Validator.validated` that returns the 225 | validated document. If the document didn't validate :obj:`None` is returned, 226 | unless you call the method with the keyword argument ``always_return_document`` 227 | set to ``True``. 228 | It can be useful for flows like this: 229 | 230 | .. testsetup:: 231 | 232 | documents = () 233 | 234 | .. testcode:: 235 | 236 | v = Validator(schema) 237 | valid_documents = [x for x in [v.validated(y) for y in documents] 238 | if x is not None] 239 | 240 | If a coercion callable or method raises an exception then the exception will 241 | be caught and the validation with fail. 242 | 243 | .. versionadded:: 0.9 244 | 245 | `normalized` Method 246 | ~~~~~~~~~~~~~~~~~~~ 247 | Similarly, the :meth:`~cerberus.Validator.normalized` method returns a 248 | normalized copy of a document without validating it: 249 | 250 | .. doctest:: 251 | 252 | >>> schema = {'amount': {'coerce': int}} 253 | >>> document = {'model': 'consumerism', 'amount': '1'} 254 | >>> normalized_document = v.normalized(document, schema) 255 | >>> type(normalized_document['amount']) 256 | 257 | 258 | .. versionadded:: 1.0 259 | 260 | 261 | Warnings 262 | -------- 263 | 264 | Warnings, such as about deprecations or likely causes of trouble, are issued 265 | through the Python standard library's :mod:`warnings` module. The logging 266 | module can be configured to catch these :func:`logging.captureWarnings`. 267 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "Cerberus" 7 | version = "1.3.7" 8 | description = """\ 9 | Lightweight, extensible schema and data validation tool for Python\ 10 | dictionaries.\ 11 | """ 12 | authors = [ 13 | {name = "Nicola Iarocci", email = "nicola@nicolaiarocci.com"}, 14 | ] 15 | maintainers = [ 16 | {name = "Frank Sachsenheim", email = "funkyfuture@riseup.net"}, 17 | ] 18 | license = {file = "LICENSE"} 19 | readme = "README.rst" 20 | classifiers = [ 21 | "Development Status :: 5 - Production/Stable", 22 | "Intended Audience :: Developers", 23 | "Natural Language :: English", 24 | "License :: OSI Approved :: ISC License (ISCL)", 25 | "Operating System :: OS Independent", 26 | "Programming Language :: Python", 27 | "Programming Language :: Python :: 3", 28 | "Programming Language :: Python :: 3.7", 29 | "Programming Language :: Python :: 3.8", 30 | "Programming Language :: Python :: 3.9", 31 | "Programming Language :: Python :: 3.10", 32 | "Programming Language :: Python :: 3.11", 33 | "Programming Language :: Python :: 3.12", 34 | "Programming Language :: Python :: 3.13", 35 | "Programming Language :: Python :: Implementation :: CPython", 36 | "Programming Language :: Python :: Implementation :: PyPy", 37 | ] 38 | keywords = [ 39 | "validation", 40 | "schema", 41 | "dictionaries", 42 | "documents", 43 | "normalization" 44 | ] 45 | requires-python = ">=3.7" 46 | dependencies = ["importlib-metadata; python_version < '3.8'"] 47 | 48 | [project.urls] 49 | Documentation = "http://docs.python-cerberus.org" 50 | Repository = "https://github.com/pyeve/cerberus" 51 | 52 | 53 | [tool.black] 54 | safe = true 55 | skip-string-normalization = true 56 | 57 | 58 | [tool.pytest.ini_options] 59 | filterwarnings = [ 60 | "error" 61 | ] 62 | 63 | 64 | [tool.setuptools] 65 | include-package-data = false 66 | 67 | [tool.setuptools.packages.find] 68 | include = ["cerberus"] 69 | exclude = ["*.benchmarks", "*.tests"] 70 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist=py37,py38,py39,py310,py311,py312,pypy3,doclinks,doctest,linting 3 | 4 | [testenv] 5 | deps=pytest 6 | commands=pytest cerberus/tests 7 | 8 | [testenv:doclinks] 9 | deps=-rdocs/requirements.txt 10 | allowlist_externals=make 11 | changedir=docs 12 | commands=make linkcheck 13 | 14 | [testenv:doctest] 15 | deps=PyYAML 16 | -rdocs/requirements.txt 17 | allowlist_externals=make 18 | changedir=docs 19 | commands=make doctest 20 | 21 | [testenv:linting] 22 | skipsdist=True 23 | deps=pre-commit 24 | commands=pre-commit run --config .linting-config.yaml --all-files 25 | 26 | [flake8] 27 | max-line-length=88 28 | ignore=E203,W503,W605 29 | --------------------------------------------------------------------------------