├── tests
├── __init__.py
├── requirements.txt
├── conftest.py
├── check_tag.py
├── test_settings.py
├── test_utils.py
├── test_datetime_parse.py
├── test_main.py
├── test_complex.py
└── test_types.py
├── benchmarks
├── requirements.txt
├── profile.py
├── test_pydantic.py
├── test_trafaret.py
└── run.py
├── pydantic
├── version.py
├── __init__.py
├── env_settings.py
├── exceptions.py
├── utils.py
├── validators.py
├── types.py
├── datetime_parse.py
├── main.py
└── fields.py
├── HISTORY.rst
├── .gitignore
├── docs
├── example2.py
├── example1.py
├── _templates
│ └── layout.html
├── usage_recursive.py
├── usage_choices.py
├── usage_settings.py
├── usage_config.py
├── usage_compound.py
├── Makefile
├── usage_exotic.py
├── usage_errors.py
├── index.rst
└── conf.py
├── .editorconfig
├── setup.cfg
├── README.rst
├── LICENSE
├── .travis.yml
├── Makefile
└── setup.py
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/benchmarks/requirements.txt:
--------------------------------------------------------------------------------
1 | python-dateutil # pyup: ignore
2 | trafaret # pyup: ignore
3 |
--------------------------------------------------------------------------------
/pydantic/version.py:
--------------------------------------------------------------------------------
1 | from distutils.version import StrictVersion
2 |
3 | __all__ = ['VERSION']
4 |
5 | VERSION = StrictVersion('0.1.0')
6 |
--------------------------------------------------------------------------------
/HISTORY.rst:
--------------------------------------------------------------------------------
1 | .. :changelog:
2 |
3 | History
4 | -------
5 |
6 | v0.1.0 (2017-06-03)
7 | ...................
8 | * add docs
9 | * add history
10 |
--------------------------------------------------------------------------------
/pydantic/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | from .env_settings import BaseSettings
3 | from .exceptions import *
4 | from .main import BaseModel
5 | from .types import *
6 | from .version import VERSION
7 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | env/
3 | *.py[cod]
4 | *.egg-info/
5 | build/
6 | dist/
7 | .cache/
8 | .mypy_cache/
9 | test.py
10 | .coverage
11 | htmlcov/
12 | benchmarks/cases.json
13 | docs/_build/
14 |
--------------------------------------------------------------------------------
/tests/requirements.txt:
--------------------------------------------------------------------------------
1 | coverage==4.4.1
2 | docutils==0.13.1
3 | flake8==3.3.0
4 | pycodestyle==2.3.1
5 | pyflakes==1.5.0
6 | pytest==3.1.1
7 | pytest-cov==2.5.1
8 | pytest-isort==0.1.0
9 | pytest-sugar==0.8.0
10 | Pygments==2.2.0
11 | Sphinx==1.6.2
12 |
--------------------------------------------------------------------------------
/docs/example2.py:
--------------------------------------------------------------------------------
1 | UserModel(signup_ts='broken')
2 | """
3 | pydantic.exceptions.ValidationError: 2 errors validating input
4 | user_id:
5 | field required (error_type=Missing)
6 | signup_ts:
7 | Invalid datetime format (error_type=ValueError track=datetime)
8 | """
9 |
--------------------------------------------------------------------------------
/benchmarks/profile.py:
--------------------------------------------------------------------------------
1 | import json
2 | from test_pydantic import TestPydantic
3 |
4 | with open('./benchmarks/cases.json') as f:
5 | cases = json.load(f)
6 |
7 | count, pass_count = 0, 0
8 | test = TestPydantic(False)
9 | for case in cases:
10 | passed, result = test.validate(case)
11 | count += 1
12 | pass_count += passed
13 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | # Unix-style newlines with a newline ending every file
4 | [*]
5 | end_of_line = lf
6 | insert_final_newline = true
7 |
8 | [*.py]
9 | indent_style = space
10 | indent_size = 4
11 | line_length = 120
12 |
13 | [Makefile]
14 | indent_style = tab
15 |
16 | [*.rst]
17 | indent_style = space
18 | indent_size = 4
19 |
--------------------------------------------------------------------------------
/docs/example1.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from pydantic import BaseModel
3 |
4 | class UserModel(BaseModel):
5 | id: int = ...
6 | name = 'John Doe'
7 | signup_ts: datetime = None
8 |
9 | external_data = {'id': '123', 'signup_ts': '2017-06-01 12:22'}
10 | user = UserModel(**external_data)
11 | print(user)
12 | # > UserModel id=123 name='John Doe' signup_ts=datetime.datetime(2017, 6, 1, 12, 22)
13 | print(user.id)
14 | # > 123
15 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [tool:pytest]
2 | testpaths = tests
3 | addopts = --isort
4 | timeout = 10
5 | filterwarnings = ignore
6 |
7 | [flake8]
8 | max-line-length = 120
9 | max-complexity = 10
10 |
11 | [bdist_wheel]
12 | python-tag = py36
13 |
14 | [coverage:run]
15 | source = pydantic
16 | branch = True
17 |
18 | [coverage:report]
19 | precision = 2
20 | exclude_lines =
21 | pragma: no cover
22 | raise NotImplementedError
23 | raise NotImplemented
24 |
25 | [isort]
26 | line_length=120
27 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 |
5 |
6 | class SetEnv:
7 | def __init__(self):
8 | self.envars = set()
9 |
10 | def set(self, name, value):
11 | self.envars.add(name)
12 | os.environ[name] = value
13 |
14 | def clear(self):
15 | for n in self.envars:
16 | os.environ.pop(n)
17 |
18 |
19 | @pytest.yield_fixture
20 | def env():
21 | setenv = SetEnv()
22 |
23 | yield setenv
24 |
25 | setenv.clear()
26 |
--------------------------------------------------------------------------------
/docs/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends '!layout.html' %}
2 |
3 | {% block footer %}
4 |
12 | {% endblock %}
13 |
--------------------------------------------------------------------------------
/tests/check_tag.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import sys
4 |
5 | from pydantic.version import VERSION
6 |
7 | git_tag = os.getenv('TRAVIS_TAG')
8 | if git_tag:
9 | if git_tag.lower().lstrip('v') != str(VERSION).lower():
10 | print('✖ "TRAVIS_TAG" environment variable does not match package version: "%s" vs. "%s"' % (git_tag, VERSION))
11 | sys.exit(1)
12 | else:
13 | print('✓ "TRAVIS_TAG" environment variable matches package version: "{}" vs. "%s"'.format(git_tag, VERSION))
14 | else:
15 | print('✓ "TRAVIS_TAG" not defined')
16 |
--------------------------------------------------------------------------------
/docs/usage_recursive.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | from pydantic import BaseModel
3 |
4 | class Foo(BaseModel):
5 | count: int = ...
6 | size: float = None
7 |
8 | class Bar(BaseModel):
9 | apple = 'x'
10 | banana = 'y'
11 |
12 | class Spam(BaseModel):
13 | foo: Foo = ...
14 | bars: List[Bar] = ...
15 |
16 |
17 | m = Spam(foo={'count': 4}, bars=[{'apple': 'x1'}, {'apple': 'x2'}])
18 | print(m)
19 | # > Spam foo= bars=[, ]
20 | print(m.values)
21 | # {'foo': {'count': 4, 'size': None}, 'bars': [{'apple': 'x1', 'banana': 'y'}, {'apple': 'x2', 'banana': 'y'}]}
22 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | pydantic
2 | ========
3 |
4 | |BuildStatus| |Coverage| |pypi|
5 |
6 | Data validation and settings management using python 3.6 type hinting.
7 |
8 | See `documentation`_ for more details.
9 |
10 | .. |BuildStatus| image:: https://travis-ci.org/samuelcolvin/pydantic.svg?branch=master
11 | :target: https://travis-ci.org/samuelcolvin/pydantic
12 | .. |Coverage| image:: https://codecov.io/gh/samuelcolvin/pydantic/branch/master/graph/badge.svg
13 | :target: https://codecov.io/gh/samuelcolvin/pydantic
14 | .. |pypi| image:: https://img.shields.io/pypi/v/pydantic.svg
15 | :target: https://pypi.python.org/pypi/pydantic
16 | .. _documentation: https://pydantic-docs.helpmanual.io/
17 |
--------------------------------------------------------------------------------
/docs/usage_choices.py:
--------------------------------------------------------------------------------
1 | from enum import Enum, IntEnum
2 |
3 | from pydantic import BaseModel
4 |
5 |
6 | class FruitEnum(str, Enum):
7 | pear = 'pear'
8 | banana = 'banana'
9 |
10 |
11 | class ToolEnum(IntEnum):
12 | spanner = 1
13 | wrench = 2
14 |
15 |
16 | class CookingModel(BaseModel):
17 | fruit: FruitEnum = FruitEnum.pear
18 | tool: ToolEnum = ToolEnum.spanner
19 |
20 |
21 | print(CookingModel())
22 | # > CookingModel fruit= tool=
23 | print(CookingModel(tool=2, fruit='banana'))
24 | # > CookingModel fruit= tool=
25 | print(CookingModel(fruit='other'))
26 | # will raise a validation error
27 |
--------------------------------------------------------------------------------
/docs/usage_settings.py:
--------------------------------------------------------------------------------
1 | from pydantic import DSN, BaseSettings, PyObject
2 |
3 |
4 | class Settings(BaseSettings):
5 | redis_host = 'localhost'
6 | redis_port = 6379
7 | redis_database = 0
8 | redis_password: str = None
9 |
10 | auth_key: str = ...
11 |
12 | invoicing_cls: PyObject = 'path.to.Invoice'
13 |
14 | db_name = 'foobar'
15 | db_user = 'postgres'
16 | db_password: str = None
17 | db_host = 'localhost'
18 | db_port = '5432'
19 | db_driver = 'postgres'
20 | db_query: dict = None
21 | dsn: DSN = None
22 |
23 | class Config:
24 | env_prefix = 'MY_PREFIX_' # defaults to 'APP_'
25 | fields = {
26 | 'auth_key': {
27 | 'alias': 'my_api_key'
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/docs/usage_config.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 |
3 |
4 | class UserModel(BaseModel):
5 | id: int = ...
6 |
7 | class Config:
8 | min_anystr_length = 0 # min length for str & byte types
9 | max_anystr_length = 2 ** 16 # max length for str & byte types
10 | min_number_size = -2 ** 64 # min size for numbers
11 | max_number_size = 2 ** 64 # max size for numbers
12 | raise_exception = True # whether or not to raise an exception if the data is invalid
13 | validate_all = False # whether or not to validate field defaults
14 | ignore_extra = True # whether to ignore any extra values in input data
15 | allow_extra = False # whether or not too allow (and include on the model) any extra values in input data
16 | fields = None # extra information on each field, currently just "alias is allowed"
17 |
--------------------------------------------------------------------------------
/docs/usage_compound.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Optional, Union, Set
2 |
3 | from pydantic import BaseModel
4 |
5 |
6 | class Model(BaseModel):
7 | simple_list: list = None
8 | list_of_ints: List[int] = None
9 |
10 | simple_dict: dict = None
11 | dict_str_float: Dict[str, float] = None
12 |
13 | simple_set: set = None
14 | set_bytes: Set[bytes] = None
15 |
16 | str_or_bytes: Union[str, bytes] = None
17 | none_or_str: Optional[str] = None
18 |
19 | compound: Dict[Union[str, bytes], List[Set[int]]] = None
20 |
21 | print(Model(simple_list=['1', '2', '3']).simple_list) # > ['1', '2', '3']
22 | print(Model(list_of_ints=['1', '2', '3']).list_of_ints) # > [1, 2, 3]
23 |
24 | print(Model(simple_dict={'a': 1, b'b': 2}).simple_dict) # > {'a': 1, b'b': 2}
25 | print(Model(dict_str_float={'a': 1, b'b': 2}).dict_str_float) # > {'a': 1.0, 'b': 2.0}
26 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # You can set these variables from the command line.
2 | SPHINXOPTS = -W
3 | SPHINXBUILD = sphinx-build
4 | PAPER =
5 | BUILDDIR = _build
6 | STATICDIR = _static
7 |
8 | # Internal variables.
9 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
10 |
11 | .PHONY: clean
12 | clean:
13 | rm -rf $(BUILDDIR)
14 |
15 | .PHONY: html
16 | html:
17 | mkdir -p $(STATICDIR)
18 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
19 | @echo
20 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
21 |
22 | .PHONY: linkcheck
23 | linkcheck:
24 | mkdir -p $(STATICDIR)
25 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
26 | @echo
27 | @echo "Link check complete; look for any errors in the above output " \
28 | "or in $(BUILDDIR)/linkcheck/output.txt."
29 |
30 | .PHONY: doctest
31 | doctest:
32 | mkdir -p $(STATICDIR)
33 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
34 | @echo "Testing of doctests in the sources finished, look at the " \
35 | "results in $(BUILDDIR)/doctest/output.txt."
36 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2017 Samuel Colvin
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/tests/test_settings.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from pydantic import BaseSettings, ValidationError
4 |
5 |
6 | class SimpleSettings(BaseSettings):
7 | apple: str = ...
8 |
9 |
10 | def test_sub_env(env):
11 | env.set('APP_APPLE', 'hello')
12 | s = SimpleSettings()
13 | assert s.apple == 'hello'
14 |
15 |
16 | def test_sub_env_override(env):
17 | env.set('APP_APPLE', 'hello')
18 | s = SimpleSettings(apple='goodbye')
19 | assert s.apple == 'goodbye'
20 |
21 |
22 | def test_sub_env_missing():
23 | with pytest.raises(ValidationError) as exc_info:
24 | SimpleSettings()
25 | assert """\
26 | 1 error validating input
27 | apple:
28 | None is not an allow value (error_type=TypeError track=str)\
29 | """ == str(exc_info.value)
30 |
31 |
32 | def test_other_setting(env):
33 | with pytest.raises(ValidationError):
34 | SimpleSettings(apple='a', foobar=42)
35 |
36 |
37 | def test_env_with_aliass(env):
38 | class Settings(BaseSettings):
39 | apple: str = ...
40 |
41 | class Config:
42 | fields = {
43 | 'apple': 'BOOM'
44 | }
45 | env.set('BOOM', 'hello')
46 | assert Settings().apple == 'hello'
47 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 |
3 | cache: pip
4 |
5 | services:
6 | - postgresql
7 |
8 | python:
9 | - '3.6'
10 | - 'nightly' # currently 3.7
11 |
12 | matrix:
13 | allow_failures:
14 | - python: 'nightly'
15 |
16 | install:
17 | - make install
18 | - pip freeze
19 |
20 | script:
21 | - make lint
22 | - make test
23 | - make benchmark
24 | - make docs
25 | - ./tests/check_tag.py
26 |
27 | after_success:
28 | - ls -lha
29 | - bash <(curl -s https://codecov.io/bash)
30 |
31 | deploy:
32 | provider: pypi
33 | user: samuelcolvin
34 | password:
35 | secure: QbXFF2puEWjhFUpD0yu2R+wP4QI1IKIomBkMizsiCyMutlexERElranyYB8bsakvjPaJ+zU14ufffh2u7UA7Zhep/iE4skRHq4XWxnnRLHGu5nyGf3+zSM3F9MOzV32eZ4CDLJtFb6I0ensjTpodJH2EsIYHYxTgndIZn56Qbh6CStj7Xg1zm0Ujxdzm4ZLgcS28SOF/tpjsDW9+GXwc6L1mAZWYiS98gVgzL1vBd9tL9uFbbuFwGz9uhFMzFJko7vXSl8urWB4qeCspKXa9iKH7/AOYSwXTCwcg8U2hhC9UsOapnga2BubZKlU5HRfSs9fQcpnzcP2lwhSmkrEFa8VOw83hX6+bL564xK1Q4kanfGZ1fLU4FYge3iOnqjH7ajO7xEcUrcOEYUPfxM4EfdiDw0xnAzE1ITGH1/pZikF+wjlu+ez7RmmnejgK7quT1WU7keo7pSlRSfQtNgNl6xu818x0xZ1TScfN6e9npNy4TYyIooMOOeI4tMdfcR4JClkjGKhAtBk81DH7isZgPv3uwocGnKZ2S7La97CE3ADzU3MTA9xVIOSOjzwuvAe72uS2nwzqXkS9KATdATkC9QCvheJ9jIBB4UcqnHbD8L1gkqdmZwXZqHZldq8wcqNYZb+81lumy5EZ6xSoEzlLDpXHe80EjMUOBkb5fz3D44s=
36 | distributions: sdist bdist_wheel
37 | on:
38 | tags: true
39 | python: 3.6
40 |
--------------------------------------------------------------------------------
/pydantic/env_settings.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from .main import BaseModel
4 |
5 |
6 | class BaseSettings(BaseModel):
7 | """
8 | Base class for settings, allowing values to be overridden by environment variables.
9 |
10 | Environment variables must be upper case. Eg. to override foobar, `export APP_FOOBAR="whatever"`.
11 |
12 | This is useful in production for secrets you do not wish to save in code, it places nicely with docker(-compose),
13 | Heroku and any 12 factor app design.
14 | """
15 |
16 | def __init__(self, **values):
17 | values = {
18 | **self._substitute_environ(),
19 | **values,
20 | }
21 | super().__init__(**values)
22 |
23 | def _substitute_environ(self):
24 | """
25 | Substitute environment variables into values.
26 | """
27 | d = {}
28 | for name, field in self.__fields__.items():
29 | if field.alt_alias:
30 | env_name = field.alias
31 | else:
32 | env_name = self.config.env_prefix + field.name.upper()
33 | env_var = os.getenv(env_name, None)
34 | if env_var:
35 | d[field.alias] = env_var
36 | return d
37 |
38 | class Config:
39 | env_prefix = 'APP_'
40 | validate_all = True
41 | ignore_extra = False
42 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: install
2 | install:
3 | pip install -U setuptools pip
4 | pip install -U .
5 | pip install -r tests/requirements.txt
6 | pip install -r benchmarks/requirements.txt
7 |
8 | .PHONY: isort
9 | isort:
10 | isort -rc -w 120 pydantic
11 | isort -rc -w 120 tests
12 |
13 | .PHONY: lint
14 | lint:
15 | python setup.py check -rms
16 | flake8 pydantic/ tests/
17 | pytest pydantic -p no:sugar -q
18 |
19 | .PHONY: test
20 | test:
21 | pytest --cov=pydantic
22 |
23 | .PHONY: testcov
24 | testcov:
25 | pytest --cov=pydantic && (echo "building coverage html"; coverage html)
26 |
27 | .PHONY: all
28 | all: testcov lint
29 |
30 | .PHONY: benchmark
31 | benchmark:
32 | python benchmarks/run.py
33 |
34 | .PHONY: clean
35 | clean:
36 | rm -rf `find . -name __pycache__`
37 | rm -f `find . -type f -name '*.py[co]' `
38 | rm -f `find . -type f -name '*~' `
39 | rm -f `find . -type f -name '.*~' `
40 | rm -rf .cache
41 | rm -rf htmlcov
42 | rm -rf *.egg-info
43 | rm -f .coverage
44 | rm -f .coverage.*
45 | rm -rf build
46 | python setup.py clean
47 | make -C docs clean
48 |
49 | .PHONY: docs
50 | docs:
51 | make -C docs clean
52 | make -C docs html
53 | @echo "open file://`pwd`/docs/_build/html/index.html"
54 |
55 | .PHONY: deploy-docs
56 | deploy-docs: docs
57 | cd docs/_build/ && cp -r html site && zip -r site.zip site
58 | @curl -H "Content-Type: application/zip" -H "Authorization: Bearer ${NETLIFY}" \
59 | --data-binary "@docs/_build/site.zip" https://api.netlify.com/api/v1/sites/pydantic-docs.netlify.com/deploys
60 |
--------------------------------------------------------------------------------
/benchmarks/test_pydantic.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from typing import List
3 |
4 | from pydantic import BaseModel, constr, EmailStr
5 |
6 |
7 | class TestPydantic:
8 | package = 'pydantic'
9 |
10 | def __init__(self, allow_extra):
11 |
12 | class Model(BaseModel):
13 | id: int = ...
14 | client_name: constr(max_length=255) = ...
15 | sort_index: float = ...
16 | client_email: EmailStr = None
17 | client_phone: constr(max_length=255) = None
18 |
19 | class Location(BaseModel):
20 | latitude: float = None
21 | longitude: float = None
22 | location: Location = None
23 |
24 | contractor: int = None
25 | upstream_http_referrer: constr(max_length=1023) = None
26 | grecaptcha_response: constr(min_length=20, max_length=1000) = ...
27 | last_updated: datetime = None
28 |
29 | class Skill(BaseModel):
30 | subject: str = ...
31 | subject_id: int = ...
32 | category: str = ...
33 | qual_level: str = ...
34 | qual_level_id: int = ...
35 | qual_level_ranking: float = 0
36 | skills: List[Skill] = []
37 |
38 | class Config:
39 | ignore_extra = allow_extra
40 |
41 | self.model = Model
42 |
43 | def validate(self, data):
44 | try:
45 | return True, self.model(**data)
46 | except ValueError:
47 | return False, None
48 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from importlib.machinery import SourceFileLoader
2 | from pathlib import Path
3 | from setuptools import setup
4 |
5 | THIS_DIR = Path(__file__).resolve().parent
6 | long_description = THIS_DIR.joinpath('README.rst').read_text()
7 |
8 | # avoid loading the package before requirements are installed:
9 | version = SourceFileLoader('version', 'pydantic/version.py').load_module()
10 |
11 | setup(
12 | name='pydantic',
13 | version=str(version.VERSION),
14 | description='Data validation and settings management using python 3.6 type hinting',
15 | long_description=long_description,
16 | classifiers=[
17 | 'Development Status :: 4 - Beta',
18 | 'Environment :: Console',
19 | 'Programming Language :: Python',
20 | 'Programming Language :: Python :: 3',
21 | 'Programming Language :: Python :: 3 :: Only',
22 | 'Programming Language :: Python :: 3.6',
23 | 'Intended Audience :: Developers',
24 | 'Intended Audience :: Information Technology',
25 | 'Intended Audience :: System Administrators',
26 | 'License :: OSI Approved :: MIT License',
27 | 'Operating System :: Unix',
28 | 'Operating System :: POSIX :: Linux',
29 | 'Environment :: MacOS X',
30 | 'Topic :: Software Development :: Libraries :: Python Modules',
31 | 'Topic :: Internet',
32 | ],
33 | author='Samuel Colvin',
34 | author_email='s@muelcolvin.com',
35 | url='https://github.com/samuelcolvin/pydantic',
36 | license='MIT',
37 | packages=['pydantic'],
38 | python_requires='>=3.6',
39 | zip_safe=True,
40 | )
41 |
--------------------------------------------------------------------------------
/docs/usage_exotic.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from pydantic import DSN, BaseModel, EmailStr, NameEmail, PyObject, conint, constr, PositiveInt, NegativeInt
4 |
5 |
6 | class Model(BaseModel):
7 | cos_function: PyObject = None
8 | path_to_something: Path = None
9 |
10 | short_str: constr(min_length=2, max_length=10) = None
11 | regex_str: constr(regex='apple (pie|tart|sandwich)') = None
12 |
13 | big_int: conint(gt=1000, lt=1024) = None
14 | pos_int: PositiveInt = None
15 | neg_int: NegativeInt = None
16 |
17 | email_address: EmailStr = None
18 | email_and_name: NameEmail = None
19 |
20 | db_name = 'foobar'
21 | db_user = 'postgres'
22 | db_password: str = None
23 | db_host = 'localhost'
24 | db_port = '5432'
25 | db_driver = 'postgres'
26 | db_query: dict = None
27 | dsn: DSN = None
28 |
29 | m = Model(
30 | cos_function='math.cos',
31 | path_to_something='/home',
32 | short_str='foo',
33 | regex_str='apple pie',
34 | big_int=1001,
35 | pos_int=1,
36 | neg_int=-1,
37 | email_address='Samuel Colvin ',
38 | email_and_name='Samuel Colvin ',
39 | )
40 | print(m.values)
41 | """
42 | {
43 | 'cos_function': ,
44 | 'path_to_something': PosixPath('/home'),
45 | 'short_str': 'foo', 'regex_str': 'apple pie',
46 | 'big_int': 1001,
47 | 'pos_int': 1,
48 | 'neg_int': -1,
49 | 'email_address': 's@muelcolvin.com',
50 | 'email_and_name': ")>,
51 | ...
52 | 'dsn': 'postgres://postgres@localhost:5432/foobar'
53 | }
54 | """
55 |
--------------------------------------------------------------------------------
/docs/usage_errors.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from pydantic import BaseModel, ValidationError
4 |
5 |
6 | class Location(BaseModel):
7 | lat = 0.1
8 | lng = 10.1
9 |
10 | class Model(BaseModel):
11 | list_of_ints: List[int] = None
12 | a_float: float = None
13 | is_required: float = ...
14 | recursive_model: Location = None
15 |
16 | try:
17 | Model(list_of_ints=['1', 2, 'bad'], a_float='not a float', recursive_model={'lat': 4.2, 'lng': 'New York'})
18 | except ValidationError as e:
19 | print(e)
20 |
21 | """
22 | 4 errors validating input
23 | list_of_ints:
24 | invalid literal for int() with base 10: 'bad' (error_type=ValueError track=int index=2)
25 | a_float:
26 | could not convert string to float: 'not a float' (error_type=ValueError track=float)
27 | is_required:
28 | field required (error_type=Missing)
29 | recursive_model:
30 | 1 error validating input (error_type=ValidationError track=Location)
31 | lng:
32 | could not convert string to float: 'New York' (error_type=ValueError track=float
33 | """
34 |
35 | try:
36 | Model(list_of_ints=1, a_float=None, recursive_model=[1, 2, 3])
37 | except ValidationError as e:
38 | print(e.json())
39 |
40 | """
41 | {
42 | "is_required": {
43 | "error_msg": "field required",
44 | "error_type": "Missing",
45 | "index": null,
46 | "track": null
47 | },
48 | "list_of_ints": {
49 | "error_msg": "'int' object is not iterable",
50 | "error_type": "TypeError",
51 | "index": null,
52 | "track": null
53 | },
54 | "recursive_model": {
55 | "error_msg": "cannot convert dictionary update sequence element #0 to a sequence",
56 | "error_type": "TypeError",
57 | "index": null,
58 | "track": "Location"
59 | }
60 | }
61 | """
62 |
--------------------------------------------------------------------------------
/benchmarks/test_trafaret.py:
--------------------------------------------------------------------------------
1 | from dateutil.parser import parse
2 | import trafaret as t
3 |
4 |
5 | class TestTrafaret:
6 | package = 'trafaret'
7 |
8 | def __init__(self, allow_extra):
9 | self.schema = t.Dict({
10 | 'id': t.Int(),
11 | 'client_name': t.String(max_length=255),
12 | 'sort_index': t.Float,
13 | t.Key('client_email', optional=True): t.Or(t.Null | t.Email()),
14 | t.Key('client_phone', optional=True): t.Or(t.Null | t.String(max_length=255)),
15 |
16 | t.Key('location', optional=True): t.Or(t.Null | t.Dict({
17 | 'latitude': t.Or(t.Float | t.Null),
18 | 'longitude': t.Or(t.Float | t.Null),
19 | })),
20 |
21 | t.Key('contractor', optional=True): t.Or(t.Null | t.Int(gt=0)),
22 | t.Key('upstream_http_referrer', optional=True): t.Or(t.Null | t.String(max_length=1023)),
23 | t.Key('grecaptcha_response'): t.String(min_length=20, max_length=1000),
24 |
25 | t.Key('last_updated', optional=True): t.Or(t.Null | t.String >> parse),
26 |
27 | t.Key('skills', default=[]): t.List(t.Dict({
28 | 'subject': t.String,
29 | 'subject_id': t.Int,
30 | 'category': t.String,
31 | 'qual_level': t.String,
32 | 'qual_level_id': t.Int,
33 | t.Key('qual_level_ranking', default=0): t.Float,
34 | })),
35 | })
36 | if allow_extra:
37 | self.schema.allow_extra('*')
38 |
39 | def validate(self, data):
40 | try:
41 | return True, self.schema.check(data)
42 | except t.DataError:
43 | return False, None
44 | except ValueError:
45 | return False, None
46 |
--------------------------------------------------------------------------------
/pydantic/exceptions.py:
--------------------------------------------------------------------------------
1 | import json
2 | from collections import OrderedDict, namedtuple
3 | from itertools import chain
4 |
5 |
6 | def type_display(type_: type):
7 | if type_:
8 | try:
9 | return type_.__name__
10 | except AttributeError:
11 | # happens with unions
12 | return str(type_)
13 |
14 |
15 | Error = namedtuple('Error', ['exc', 'track', 'index'])
16 |
17 |
18 | def pretty_errors(e):
19 | if isinstance(e, Error):
20 | d = {
21 | 'error_type': e.exc.__class__.__name__,
22 | 'track': type_display(e.track),
23 | 'index': e.index,
24 | }
25 | if isinstance(e.exc, ValidationError):
26 | d.update(
27 | error_msg=e.exc.message,
28 | error_details=e.exc.errors_dict,
29 | )
30 | else:
31 | d['error_msg'] = str(e.exc)
32 | return d
33 | elif isinstance(e, dict):
34 | return OrderedDict([(k, pretty_errors(v)) for k, v in e.items()])
35 | else:
36 | return [pretty_errors(e_) for e_ in e]
37 |
38 |
39 | E_KEYS = 'error_type', 'track', 'index'
40 |
41 |
42 | def _render_errors(e, indent=0):
43 | if isinstance(e, list):
44 | return list(chain(*(_render_errors(error, indent) for error in e)))
45 | elif isinstance(e, OrderedDict):
46 | r = []
47 | for key, error in e.items():
48 | r.append((indent, key + ':'))
49 | r.extend(_render_errors(error, indent=indent + 1))
50 | return r
51 | else:
52 | v = ' '.join(f'{k}={e.get(k)}' for k in E_KEYS if e.get(k))
53 | r = [(indent, f'{e["error_msg"]} ({v})')]
54 | error_details = e.get('error_details')
55 | if error_details:
56 | r.extend(_render_errors(error_details, indent=indent + 1))
57 | return r
58 |
59 |
60 | class ValidationError(ValueError):
61 | def __init__(self, errors):
62 | self.errors_raw = errors
63 | e_count = len(errors)
64 | self.message = f'{e_count} error{"" if e_count == 1 else "s"} validating input'
65 | super().__init__(self.message)
66 |
67 | def json(self, indent=2):
68 | return json.dumps(self.errors_dict, indent=indent, sort_keys=True)
69 |
70 | @property
71 | def errors_dict(self):
72 | return pretty_errors(self.errors_raw)
73 |
74 | @property
75 | def display_errors(self):
76 | return '\n'.join(' ' * i + msg for i, msg in _render_errors(self.errors_dict))
77 |
78 | def __str__(self):
79 | return f'{self.message}\n{self.display_errors}'
80 |
81 |
82 | class ConfigError(RuntimeError):
83 | pass
84 |
85 |
86 | class Missing(ValueError):
87 | pass
88 |
89 |
90 | class Extra(ValueError):
91 | pass
92 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 |
5 | from pydantic.utils import import_string, make_dsn, validate_email
6 |
7 |
8 | @pytest.mark.parametrize('value,name,email', [
9 | ('foobar@example.com', 'foobar', 'foobar@example.com'),
10 | ('s@muelcolvin.com', 's', 's@muelcolvin.com'),
11 | ('Samuel Colvin ', 'Samuel Colvin', 's@muelcolvin.com'),
12 | ('foobar ', 'foobar', 'foobar@example.com'),
13 | (' foo.bar@example.com', 'foo.bar', 'foo.bar@example.com'),
14 | ('foo.bar@example.com ', 'foo.bar', 'foo.bar@example.com'),
15 | ('foo BAR ', 'foo BAR', 'foobar@example.com'),
16 | ('FOO bar ', 'FOO bar', 'foobar@example.com'),
17 | (' ', 'FOOBAR', 'foobar@example.com'),
18 |
19 | ('ñoñó@example.com', 'ñoñó', 'ñoñó@example.com'),
20 | ('我買@example.com', '我買', '我買@example.com'),
21 | ('甲斐黒川日本@example.com', '甲斐黒川日本', '甲斐黒川日本@example.com'),
22 | ('чебурашкаящик-с-апельсинами.рф@example.com',
23 | 'чебурашкаящик-с-апельсинами.рф',
24 | 'чебурашкаящик-с-апельсинами.рф@example.com'),
25 | ('उदाहरण.परीक्ष@domain.with.idn.tld', 'उदाहरण.परीक्ष', 'उदाहरण.परीक्ष@domain.with.idn.tld'),
26 | ('foo.bar@example.com', 'foo.bar', 'foo.bar@example.com'),
27 | ('foo.bar@exam-ple.com ', 'foo.bar', 'foo.bar@exam-ple.com'),
28 | ])
29 | def test_address_valid(value, name, email):
30 | assert validate_email(value) == (name, email)
31 |
32 |
33 | @pytest.mark.parametrize('value', [
34 | 'f oo.bar@example.com ',
35 | 'foo.bar@exam\nple.com ',
36 | 'foobar',
37 | 'foobar ',
56 | ])
57 | def test_address_invalid(value):
58 | with pytest.raises(ValueError):
59 | validate_email(value)
60 |
61 |
62 | def test_empty_dsn():
63 | assert make_dsn(driver='foobar') == 'foobar://'
64 |
65 |
66 | def test_dsn_odd_user():
67 | assert make_dsn(driver='foobar', user='foo@bar') == 'foobar://foo%40bar@'
68 |
69 |
70 | def test_import_module():
71 | assert import_string('os.path') == os.path
72 |
73 |
74 | def test_import_module_invalid():
75 | with pytest.raises(ImportError) as exc_info:
76 | import_string('xx')
77 | assert exc_info.value.args[0] == '"xx" doesn\'t look like a module path'
78 |
79 |
80 | def test_import_no_attr():
81 | with pytest.raises(ImportError) as exc_info:
82 | import_string('os.foobar')
83 | assert exc_info.value.args[0] == 'Module "os" does not define a "foobar" attribute'
84 |
--------------------------------------------------------------------------------
/pydantic/utils.py:
--------------------------------------------------------------------------------
1 | import re
2 | from importlib import import_module
3 | from typing import Tuple
4 |
5 | PRETTY_REGEX = re.compile(r'([\w ]*?) *<(.*)> *')
6 |
7 | # max length for domain name labels is 63 characters per RFC 1034
8 | EMAIL_REGEX = re.compile(
9 | r'[^\s@\u0000-\u0020"\'`,]+@'
10 | r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z0-9]{2,63})',
11 | re.I
12 | )
13 |
14 |
15 | def validate_email(value) -> Tuple[str, str]:
16 | """
17 | Brutally simple email address validation. Note unlike most email address validation
18 | * raw ip address (literal) domain parts are not allowed.
19 | * "John Doe " style "pretty" email addresses are processed
20 | * the local part check is extremely basic. This raises the possibility of unicode spoofing, but no better
21 | solution is really possible.
22 | * spaces are striped from the beginning and end of addresses but no error is raised
23 |
24 | See RFC 5322 but treat it with suspicion, there seems to exist no universally acknowledged test for a valid email!
25 | """
26 | m = PRETTY_REGEX.fullmatch(value)
27 | if m:
28 | name, value = m.groups()
29 | else:
30 | name = None
31 |
32 | email = value.strip()
33 | if not EMAIL_REGEX.fullmatch(email):
34 | raise ValueError('Email address is not valid')
35 | return name or email[:email.index('@')], email.lower()
36 |
37 |
38 | def _rfc_1738_quote(text):
39 | return re.sub(r'[:@/]', lambda m: '%{:X}'.format(ord(m.group(0))), text)
40 |
41 |
42 | def make_dsn(*,
43 | driver: str,
44 | user: str=None,
45 | password: str=None,
46 | host: str=None,
47 | port: str=None,
48 | name: str=None,
49 | query: str=None):
50 | """
51 | Create a DSN from from connection settings.
52 |
53 | Stolen approximately from sqlalchemy/engine/url.py:URL.
54 | """
55 | s = driver + '://'
56 | if user is not None:
57 | s += _rfc_1738_quote(user)
58 | if password is not None:
59 | s += ':' + _rfc_1738_quote(password)
60 | s += '@'
61 | if host is not None:
62 | if ':' in host:
63 | s += '[{}]'.format(host)
64 | else:
65 | s += host
66 | if port is not None:
67 | s += ':{}'.format(int(port))
68 | if name is not None:
69 | s += '/' + name
70 | query = query or {}
71 | if query:
72 | keys = list(query)
73 | keys.sort()
74 | s += '?' + '&'.join('{}={}'.format(k, query[k]) for k in keys)
75 | return s
76 |
77 |
78 | def import_string(dotted_path):
79 | """
80 | Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the
81 | last name in the path. Raise ImportError if the import failed.
82 | """
83 | try:
84 | module_path, class_name = dotted_path.strip(' ').rsplit('.', 1)
85 | except ValueError as e:
86 | raise ImportError(f'"{dotted_path}" doesn\'t look like a module path') from e
87 |
88 | module = import_module(module_path)
89 | try:
90 | return getattr(module, class_name)
91 | except AttributeError as e:
92 | raise ImportError(f'Module "{module_path}" does not define a "{class_name}" attribute') from e
93 |
--------------------------------------------------------------------------------
/pydantic/validators.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 | from datetime import date, datetime, time, timedelta
3 | from enum import Enum
4 | from pathlib import Path
5 | from typing import Any
6 |
7 | from .datetime_parse import parse_date, parse_datetime, parse_duration, parse_time
8 | from .exceptions import ConfigError
9 |
10 | NoneType = type(None)
11 |
12 |
13 | def not_none_validator(v):
14 | if v is None:
15 | raise TypeError('None is not an allow value')
16 | return v
17 |
18 |
19 | def str_validator(v) -> str:
20 | if isinstance(v, (str, NoneType)):
21 | return v
22 | elif isinstance(v, bytes):
23 | return v.decode()
24 | return str(v)
25 |
26 |
27 | def bytes_validator(v) -> bytes:
28 | if isinstance(v, (bytes, NoneType)):
29 | return v
30 | return str_validator(v).encode()
31 |
32 |
33 | BOOL_STRINGS = {
34 | '1',
35 | 'TRUE',
36 | 'ON',
37 | 'YES',
38 | }
39 |
40 |
41 | def bool_validator(v) -> bool:
42 | if isinstance(v, bool):
43 | return v
44 | if isinstance(v, bytes):
45 | v = v.decode()
46 | if isinstance(v, str):
47 | return v.upper() in BOOL_STRINGS
48 | return bool(v)
49 |
50 |
51 | def number_size_validator(v, model, **kwargs):
52 | if model.config.min_number_size <= v <= model.config.max_number_size:
53 | return v
54 | raise ValueError(f'size not in range {model.config.min_number_size} to {model.config.max_number_size}')
55 |
56 |
57 | def anystr_length_validator(v, model, **kwargs):
58 | if v is None or model.config.min_anystr_length <= len(v) <= model.config.max_anystr_length:
59 | return v
60 | raise ValueError(f'length not in range {model.config.max_anystr_length} to {model.config.max_anystr_length}')
61 |
62 |
63 | def ordered_dict_validator(v) -> OrderedDict:
64 | if isinstance(v, OrderedDict):
65 | return v
66 | return OrderedDict(v)
67 |
68 |
69 | def dict_validator(v) -> dict:
70 | if isinstance(v, dict):
71 | return v
72 | return dict(v)
73 |
74 |
75 | def list_validator(v) -> list:
76 | if isinstance(v, list):
77 | return v
78 | return list(v)
79 |
80 |
81 | def tuple_validator(v) -> tuple:
82 | if isinstance(v, tuple):
83 | return v
84 | return tuple(v)
85 |
86 |
87 | def set_validator(v) -> set:
88 | if isinstance(v, set):
89 | return v
90 | return set(v)
91 |
92 |
93 | def enum_validator(v, field, **kwargs) -> Enum:
94 | return field.type_(v)
95 |
96 |
97 | # order is important here, for example: bool is a subclass of int so has to come first, datetime before date same
98 | _VALIDATORS = [
99 | (Enum, [enum_validator]),
100 |
101 | (str, [not_none_validator, str_validator, anystr_length_validator]),
102 | (bytes, [not_none_validator, bytes_validator, anystr_length_validator]),
103 |
104 | (bool, [bool_validator]),
105 | (int, [int, number_size_validator]),
106 | (float, [float, number_size_validator]),
107 |
108 | (Path, [Path]),
109 |
110 | (datetime, [parse_datetime]),
111 | (date, [parse_date]),
112 | (time, [parse_time]),
113 | (timedelta, [parse_duration]),
114 |
115 | (OrderedDict, [ordered_dict_validator]),
116 | (dict, [dict_validator]),
117 | (list, [list_validator]),
118 | (tuple, [tuple_validator]),
119 | (set, [set_validator]),
120 | ]
121 |
122 |
123 | def find_validators(type_):
124 | if type_ is Any:
125 | return []
126 | for val_type, validators in _VALIDATORS:
127 | if issubclass(type_, val_type):
128 | return validators
129 | raise ConfigError(f'no validator found for {type_}')
130 |
--------------------------------------------------------------------------------
/benchmarks/run.py:
--------------------------------------------------------------------------------
1 | import json
2 | import random
3 | import string
4 | from datetime import datetime
5 | from functools import partial
6 | from pathlib import Path
7 | from statistics import mean, stdev
8 |
9 | from test_trafaret import TestTrafaret
10 | from test_pydantic import TestPydantic
11 |
12 | PUNCTUATION = ' \t\n!"#$%&\'()*+,-./'
13 | LETTERS = string.ascii_letters
14 | UNICODE = '\xa0\xad¡¢£¤¥¦§¨©ª«¬ ®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ'
15 | ALL = PUNCTUATION * 5 + LETTERS * 20 + UNICODE
16 | random = random.SystemRandom()
17 |
18 |
19 | class GenerateData:
20 | def __init__(self):
21 | pass
22 |
23 |
24 | def rand_string(min_length, max_length, corpus=ALL):
25 | return ''.join(random.choices(corpus, k=random.randrange(min_length, max_length)))
26 |
27 |
28 | MISSING = object()
29 |
30 |
31 | def null_missing_v(f, null_chance=0.2, missing_chance=None):
32 | r = random.random()
33 | if random.random() < null_chance:
34 | return None
35 | missing_chance = null_chance if missing_chance is None else missing_chance
36 | if r < (null_chance + missing_chance):
37 | return MISSING
38 | return f()
39 |
40 |
41 | def null_missing_string(*args, **kwargs):
42 | f = partial(rand_string, *args)
43 | return null_missing_v(f, **kwargs)
44 |
45 |
46 | def rand_email():
47 | if random.random() < 0.2:
48 | c1, c2 = UNICODE, LETTERS
49 | else:
50 | c1, c2 = LETTERS, LETTERS
51 | return f'{rand_string(10, 50, corpus=c1)}@{rand_string(10, 50, corpus=c2)}.{rand_string(2, 5, corpus=c2)}'
52 |
53 |
54 | def null_missing_email():
55 | return null_missing_v(rand_email)
56 |
57 |
58 | def rand_date():
59 | r = random.randrange
60 | return f'{r(1900, 2020)}-{r(0, 12)}-{r(0, 32)}T{r(0, 24)}:{r(0, 60)}:{r(0, 60)}'
61 |
62 |
63 | def remove_missing(d):
64 | if isinstance(d, dict):
65 | return {k: remove_missing(v) for k, v in d.items() if v is not MISSING}
66 | elif isinstance(d, list):
67 | return [remove_missing(d_) for d_ in d]
68 | else:
69 | return d
70 |
71 |
72 | def generate_case():
73 | return remove_missing(dict(
74 | id=random.randrange(1, 2000),
75 | client_name=null_missing_string(10, 280, null_chance=0.05, missing_chance=0.05),
76 | sort_index=random.random() * 200,
77 | client_email=null_missing_email(), # email checks differ with different frameworks
78 | client_phone=null_missing_string(5, 15),
79 | location=dict(
80 | latitude=random.random() * 180 - 90,
81 | longitude=random.random() * 180,
82 | ),
83 | contractor=str(random.randrange(50, 2000)),
84 | upstream_http_referrer=null_missing_string(10, 1050),
85 | grecaptcha_response=null_missing_string(10, 1050, null_chance=0.05, missing_chance=0.05),
86 | last_updated=rand_date(),
87 | skills=[dict(
88 | subject=null_missing_string(5, 20, null_chance=0.01, missing_chance=0),
89 | subject_id=i,
90 | category=rand_string(5, 20),
91 | qual_level=rand_string(5, 20),
92 | qual_level_id=random.randrange(2000),
93 | qual_level_ranking=random.random() * 20
94 | ) for i in range(random.randrange(1, 5))]
95 | ))
96 |
97 | THIS_DIR = Path(__file__).parent.resolve()
98 |
99 |
100 | def main():
101 | json_path = THIS_DIR / 'cases.json'
102 | if not json_path.exists():
103 | print('generating test cases...')
104 | cases = [generate_case() for _ in range(2000)]
105 | with json_path.open('w') as f:
106 | json.dump(cases, f, indent=2, sort_keys=True)
107 | else:
108 | with json_path.open() as f:
109 | cases = json.load(f)
110 | tests = [TestTrafaret, TestPydantic]
111 | for test_class in tests:
112 | times = []
113 | p = test_class.package
114 | for i in range(5):
115 | count, pass_count = 0, 0
116 | start = datetime.now()
117 | test = test_class(False)
118 | for i in range(3):
119 | for case in cases:
120 | passed, result = test.validate(case)
121 | count += 1
122 | pass_count += passed
123 | time = (datetime.now() - start).total_seconds()
124 | success = pass_count / count * 100
125 | print(f'{p:10} time={time:0.3f}s, success={success:0.2f}%')
126 | times.append(time)
127 | print(f'{p:10} best={min(times):0.3f}s, avg={mean(times):0.3f}s, stdev={stdev(times):0.3f}s')
128 |
129 |
130 | if __name__ == '__main__':
131 | main()
132 |
--------------------------------------------------------------------------------
/pydantic/types.py:
--------------------------------------------------------------------------------
1 | import re
2 | from typing import Optional, Type, Union
3 |
4 | from .utils import import_string, make_dsn, validate_email
5 | from .validators import str_validator
6 |
7 | __all__ = [
8 | 'NoneStr',
9 | 'NoneBytes',
10 | 'StrBytes',
11 | 'NoneStrBytes',
12 | 'ConstrainedStr',
13 | 'constr',
14 | 'EmailStr',
15 | 'NameEmail',
16 | 'PyObject',
17 | 'DSN',
18 | 'ConstrainedInt',
19 | 'conint',
20 | 'PositiveInt',
21 | 'NegativeInt',
22 | ]
23 |
24 | NoneStr = Optional[str]
25 | NoneBytes = Optional[bytes]
26 | StrBytes = Union[str, bytes]
27 | NoneStrBytes = Optional[StrBytes]
28 |
29 |
30 | class ConstrainedStr(str):
31 | min_length: int = None
32 | max_length: int = None
33 | curtail_length: int = None
34 | regex = None
35 |
36 | @classmethod
37 | def get_validators(cls):
38 | yield str_validator
39 | yield cls.validate
40 |
41 | @classmethod
42 | def validate(cls, value: str) -> str:
43 | l = len(value)
44 | if cls.min_length is not None and l < cls.min_length:
45 | raise ValueError(f'length less than minimum allowed: {cls.min_length}')
46 |
47 | if cls.curtail_length:
48 | if l > cls.curtail_length:
49 | value = value[:cls.curtail_length]
50 | elif cls.max_length is not None and l > cls.max_length:
51 | raise ValueError(f'length greater than maximum allowed: {cls.max_length}')
52 |
53 | if cls.regex:
54 | if not cls.regex.match(value):
55 | raise ValueError(f'string does not match regex "{cls.regex.pattern}"')
56 | return value
57 |
58 |
59 | class EmailStr(str):
60 | @classmethod
61 | def get_validators(cls):
62 | yield str_validator
63 | yield cls.validate
64 |
65 | @classmethod
66 | def validate(cls, value):
67 | return validate_email(value)[1]
68 |
69 |
70 | class NameEmail:
71 | __slots__ = 'name', 'email'
72 |
73 | def __init__(self, name, email):
74 | self.name = name
75 | self.email = email
76 |
77 | @classmethod
78 | def get_validators(cls):
79 | yield str_validator
80 | yield cls.validate
81 |
82 | @classmethod
83 | def validate(cls, value):
84 | return cls(*validate_email(value))
85 |
86 | def __str__(self):
87 | return f'{self.name} <{self.email}>'
88 |
89 | def __repr__(self):
90 | return f''
91 |
92 |
93 | def constr(*, min_length=0, max_length=2**16, curtail_length=None, regex=None) -> Type[str]:
94 | # use kwargs then define conf in a dict to aid with IDE type hinting
95 | namespace = dict(
96 | min_length=min_length,
97 | max_length=max_length,
98 | curtail_length=curtail_length,
99 | regex=regex and re.compile(regex)
100 | )
101 | return type('ConstrainedStrValue', (ConstrainedStr,), namespace)
102 |
103 |
104 | class PyObject:
105 | validate_always = True
106 |
107 | @classmethod
108 | def get_validators(cls):
109 | yield str_validator
110 | yield cls.validate
111 |
112 | @classmethod
113 | def validate(cls, value):
114 | try:
115 | return import_string(value)
116 | except ImportError as e:
117 | # errors must be TypeError or ValueError
118 | raise ValueError(str(e)) from e
119 |
120 |
121 | class DSN(str):
122 | prefix = 'db_'
123 | validate_always = True
124 |
125 | @classmethod
126 | def get_validators(cls):
127 | yield str_validator
128 | yield cls.validate
129 |
130 | @classmethod
131 | def validate(cls, value, model, **kwarg):
132 | if value:
133 | return value
134 | d = model.__values__
135 | kwargs = {f: d.get(cls.prefix + f) for f in ('driver', 'user', 'password', 'host', 'port', 'name', 'query')}
136 | if kwargs['driver'] is None:
137 | raise ValueError(f'"{cls.prefix}driver" field may not be missing or None')
138 | return make_dsn(**kwargs)
139 |
140 |
141 | class ConstrainedInt(int):
142 | gt: int = None
143 | lt: int = None
144 |
145 | @classmethod
146 | def get_validators(cls):
147 | yield int
148 | yield cls.validate
149 |
150 | @classmethod
151 | def validate(cls, value: int) -> int:
152 | if cls.gt is not None and value <= cls.gt:
153 | raise ValueError(f'size less than minimum allowed: {cls.gt}')
154 | elif cls.lt is not None and value >= cls.lt:
155 | raise ValueError(f'size greater than maximum allowed: {cls.lt}')
156 | return value
157 |
158 |
159 | def conint(*, gt=None, lt=None) -> Type[int]:
160 | # use kwargs then define conf in a dict to aid with IDE type hinting
161 | namespace = dict(gt=gt, lt=lt)
162 | return type('ConstrainedIntValue', (ConstrainedInt,), namespace)
163 |
164 |
165 | class PositiveInt(ConstrainedInt):
166 | gt = 0
167 |
168 |
169 | class NegativeInt(ConstrainedInt):
170 | lt = 0
171 |
172 |
173 | # TODO, JsonEither, JsonList, JsonDict
174 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | pydantic
2 | ========
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 |
7 | |pypi| |license|
8 |
9 | Current Version: |version|
10 |
11 | Data validation and settings management using python 3.6 type hinting.
12 |
13 | Define how data should be in pure canonical python, validate it with *pydantic*.
14 |
15 | `PEP 484 `_ introduced typing hinting into python in 3.5 and
16 | `PEP 526 `_ extended that with syntax for variable annotation in 3.6.
17 | *pydantic* uses those annotations to validate that untrusted data takes the form you want.
18 |
19 | Simple example:
20 |
21 | .. literalinclude:: example1.py
22 |
23 | (This script is complete, it should run "as is")
24 |
25 | What's going on here:
26 |
27 | * ``id`` is of type int, the elipisis tells pydantic this field is required. Strings, bytes or floats will be
28 | converted to ints if possible, otherwise an exception would be raised.
29 | * ``name`` pydantic infers is a string from the default, it is not required as it has a default
30 | * ``signup_ts`` is a datetime field which is not required (``None`` if it's not supplied), pydantic will process
31 | either a unix timestamp int (eg. ``1496498400``) or a string representing the date & time.
32 |
33 | If validation fails pydantic with raise an error with a breakdown of what was wrong:
34 |
35 | .. literalinclude:: example2.py
36 |
37 | Rationale
38 | ---------
39 |
40 | So *pydantic* uses some cool new language feature, but why should I actually go an use it?
41 |
42 | **no brainfuck**
43 | no new schema definition micro-language to learn. If you know python (and perhaps skim read the
44 | `type hinting docs `_) you know how to use pydantic.
45 |
46 | **plays nicely with your IDE/linter/brain**
47 | because pydantic data structures are just instances of classes you define; autocompleting, linting,
48 | `mypy `_ your intuition should all work properly with your validated data.
49 |
50 | **dual use**
51 | pydantic's :ref:`BaseSettings ` class allows it to be used in both a "validate this request data" context
52 | and "load my system settings" context. The main difference being that system settings can can have defaults changed
53 | by environment variables and are otherwise only changed in unit tests.
54 |
55 | **fast**
56 | In `benchmarks `_ pydantic is around twice as
57 | fast as `trafaret `_. Other comparisons to cerberus, DRF, jsonmodels to come.
58 |
59 | **validate complex structures**
60 | use of recursive pydantic models and ``typing``'s ``List`` and ``Dict`` etc. allow complex data schemas to be
61 | clearly and easily defined.
62 |
63 | **extendible**
64 | pydantic allows custom data types to be defined or you can extend validation with the `clean_*` methods on a model.
65 |
66 |
67 | Install
68 | -------
69 |
70 | Just::
71 |
72 | pip install pydantic
73 |
74 | pydantic has no dependencies except python 3.6+. If you've got python 3.6 and ``pip`` installed - you're good to go.
75 |
76 | Usage
77 | -----
78 |
79 | Compound Types
80 | ..............
81 |
82 | pyandtic uses ``typing`` types to define more complex objects.
83 |
84 | .. literalinclude:: usage_compound.py
85 |
86 | (This script is complete, it should run "as is")
87 |
88 | Choices
89 | .......
90 |
91 | pyandtic uses python's standard ``enum`` classes to define value chocies.
92 |
93 | .. literalinclude:: usage_choices.py
94 |
95 | (This script is complete, it should run "as is")
96 |
97 | Recursive Models
98 | ................
99 |
100 | More complex hierarchical data structures can be defined using models as types in annotations themselves.
101 |
102 | .. literalinclude:: usage_recursive.py
103 |
104 | (This script is complete, it should run "as is")
105 |
106 | Error Handling
107 | ..............
108 |
109 | .. literalinclude:: usage_errors.py
110 |
111 | (This script is complete, it should run "as is")
112 |
113 | Exotic Types
114 | ............
115 |
116 | pydantic comes with a number of utilities for parsing or validating common objects.
117 |
118 | .. literalinclude:: usage_exotic.py
119 |
120 | (This script is complete, it should run "as is")
121 |
122 |
123 | Model Config
124 | ............
125 |
126 | Behaviour of pydantic can be controlled via the ``Config`` class on a model.
127 |
128 | Here default for config parameter are shown together with their meaning.
129 |
130 | .. literalinclude:: usage_config.py
131 |
132 | .. _settings:
133 |
134 | Settings
135 | ........
136 |
137 | One of pydantics most useful applications is to define default settings, allow them to be overridden by
138 | environment variables or keyword arguments (eg. in unit tests).
139 |
140 | This usage example comes last as it uses numerous concepts described above.
141 |
142 | .. literalinclude:: usage_settings.py
143 |
144 | Here ``redis_port`` could be modified via ``export MY_PREFIX_REDIS_PORT=6380`` or ``auth_key`` by
145 | ``export my_api_key=6380``.
146 |
147 | .. include:: ../HISTORY.rst
148 |
149 | .. |pypi| image:: https://img.shields.io/pypi/v/pydantic.svg
150 | :target: https://pypi.python.org/pypi/pydantic
151 | .. |license| image:: https://img.shields.io/pypi/l/pydantic.svg
152 | :target: https://github.com/samuelcolvin/pydantic
153 |
--------------------------------------------------------------------------------
/tests/test_datetime_parse.py:
--------------------------------------------------------------------------------
1 | """
2 | Stolen from https://github.com/django/django/blob/master/tests/utils_tests/test_dateparse.py at
3 | 9718fa2e8abe430c3526a9278dd976443d4ae3c6
4 |
5 | Changed to:
6 | * use standard pytest layout
7 | * parametrize tests
8 | """
9 | from datetime import date, datetime, time, timedelta, timezone
10 |
11 | import pytest
12 |
13 | from pydantic.datetime_parse import parse_date, parse_datetime, parse_duration, parse_time
14 |
15 |
16 | def create_tz(minutes):
17 | return timezone(timedelta(minutes=minutes))
18 |
19 |
20 | @pytest.mark.parametrize('value,result', [
21 | # Valid inputs
22 | ('1494012444.883309', date(2017, 5, 5)),
23 | (1494012444.883309, date(2017, 5, 5)),
24 | ('1494012444', date(2017, 5, 5)),
25 | (1494012444, date(2017, 5, 5)),
26 | ('2012-04-23', date(2012, 4, 23)),
27 | ('2012-4-9', date(2012, 4, 9)),
28 | # Invalid inputs
29 | ('x20120423', ValueError),
30 | ('2012-04-56', ValueError),
31 | ])
32 | def test_date_parsing(value, result):
33 | if result == ValueError:
34 | with pytest.raises(ValueError):
35 | parse_date(value)
36 | else:
37 | assert parse_date(value) == result
38 |
39 |
40 | @pytest.mark.parametrize('value,result', [
41 | # Valid inputs
42 | ('09:15:00', time(9, 15)),
43 | ('10:10', time(10, 10)),
44 | ('10:20:30.400', time(10, 20, 30, 400000)),
45 | ('4:8:16', time(4, 8, 16)),
46 | # Invalid inputs
47 | ('091500', ValueError),
48 | ('09:15:90', ValueError),
49 | ])
50 | def test_time_parsing(value, result):
51 | if result == ValueError:
52 | with pytest.raises(ValueError):
53 | parse_time(value)
54 | else:
55 | assert parse_time(value) == result
56 |
57 |
58 | @pytest.mark.parametrize('value,result', [
59 | # Valid inputs
60 | # values in seconds
61 | ('1494012444.883309', datetime(2017, 5, 5, 19, 27, 24, 883309, tzinfo=timezone.utc)),
62 | (1494012444.883309, datetime(2017, 5, 5, 19, 27, 24, 883309, tzinfo=timezone.utc)),
63 | ('1494012444', datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
64 | (1494012444, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
65 | # values in ms
66 | ('1494012444000.883309', datetime(2017, 5, 5, 19, 27, 24, 883, tzinfo=timezone.utc)),
67 | (1494012444000, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
68 |
69 | ('2012-04-23T09:15:00', datetime(2012, 4, 23, 9, 15)),
70 | ('2012-4-9 4:8:16', datetime(2012, 4, 9, 4, 8, 16)),
71 | ('2012-04-23T09:15:00Z', datetime(2012, 4, 23, 9, 15, 0, 0, timezone.utc)),
72 | ('2012-4-9 4:8:16-0320', datetime(2012, 4, 9, 4, 8, 16, 0, create_tz(-200))),
73 | ('2012-04-23T10:20:30.400+02:30', datetime(2012, 4, 23, 10, 20, 30, 400000, create_tz(150))),
74 | ('2012-04-23T10:20:30.400+02', datetime(2012, 4, 23, 10, 20, 30, 400000, create_tz(120))),
75 | ('2012-04-23T10:20:30.400-02', datetime(2012, 4, 23, 10, 20, 30, 400000, create_tz(-120))),
76 | # Invalid inputs
77 | ('x20120423091500', ValueError),
78 | ('2012-04-56T09:15:90', ValueError),
79 | ])
80 | def test_datetime_parsing(value, result):
81 | if result == ValueError:
82 | with pytest.raises(ValueError):
83 | parse_datetime(value)
84 | else:
85 | assert parse_datetime(value) == result
86 |
87 |
88 | @pytest.mark.parametrize('delta', [
89 | timedelta(days=4, minutes=15, seconds=30, milliseconds=100), # fractions of seconds
90 | timedelta(hours=10, minutes=15, seconds=30), # hours, minutes, seconds
91 | timedelta(days=4, minutes=15, seconds=30), # multiple days
92 | timedelta(days=1, minutes=00, seconds=00), # single day
93 | timedelta(days=-4, minutes=15, seconds=30), # negative durations
94 | timedelta(minutes=15, seconds=30), # minute & seconds
95 | timedelta(seconds=30), # seconds
96 | ])
97 | def test_parse_python_format(delta):
98 | assert parse_duration(format(delta)) == delta
99 |
100 |
101 | @pytest.mark.parametrize('value,result', [
102 | # seconds
103 | ('30', timedelta(seconds=30)),
104 | (30, timedelta(seconds=30)),
105 | (30.1, timedelta(seconds=30, milliseconds=100)),
106 | # minutes seconds
107 | ('15:30', timedelta(minutes=15, seconds=30)),
108 | ('5:30', timedelta(minutes=5, seconds=30)),
109 | # hours minutes seconds
110 | ('10:15:30', timedelta(hours=10, minutes=15, seconds=30)),
111 | ('1:15:30', timedelta(hours=1, minutes=15, seconds=30)),
112 | ('100:200:300', timedelta(hours=100, minutes=200, seconds=300)),
113 | # days
114 | ('4 15:30', timedelta(days=4, minutes=15, seconds=30)),
115 | ('4 10:15:30', timedelta(days=4, hours=10, minutes=15, seconds=30)),
116 | # fractions of seconds
117 | ('15:30.1', timedelta(minutes=15, seconds=30, milliseconds=100)),
118 | ('15:30.01', timedelta(minutes=15, seconds=30, milliseconds=10)),
119 | ('15:30.001', timedelta(minutes=15, seconds=30, milliseconds=1)),
120 | ('15:30.0001', timedelta(minutes=15, seconds=30, microseconds=100)),
121 | ('15:30.00001', timedelta(minutes=15, seconds=30, microseconds=10)),
122 | ('15:30.000001', timedelta(minutes=15, seconds=30, microseconds=1)),
123 | # negative
124 | ('-4 15:30', timedelta(days=-4, minutes=15, seconds=30)),
125 | ('-172800', timedelta(days=-2)),
126 | ('-15:30', timedelta(minutes=-15, seconds=30)),
127 | ('-1:15:30', timedelta(hours=-1, minutes=15, seconds=30)),
128 | ('-30.1', timedelta(seconds=-30, milliseconds=-100)),
129 | # iso_8601
130 | ('P4Y', ValueError),
131 | ('P4M', ValueError),
132 | ('P4W', ValueError),
133 | ('P4D', timedelta(days=4)),
134 | ('P0.5D', timedelta(hours=12)),
135 | ('PT5H', timedelta(hours=5)),
136 | ('PT5M', timedelta(minutes=5)),
137 | ('PT5S', timedelta(seconds=5)),
138 | ('PT0.000005S', timedelta(microseconds=5)),
139 | ])
140 | def test_parse_durations(value, result):
141 | if result == ValueError:
142 | with pytest.raises(ValueError):
143 | parse_duration(value)
144 | else:
145 | assert parse_duration(value) == result
146 |
--------------------------------------------------------------------------------
/pydantic/datetime_parse.py:
--------------------------------------------------------------------------------
1 | """
2 | Functions to parse datetime objects.
3 |
4 | We're using regular expressions rather than time.strptime because:
5 | - They provide both validation and parsing.
6 | - They're more flexible for datetimes.
7 | - The date/datetime/time constructors produce friendlier error messages.
8 |
9 | Stolen from https://raw.githubusercontent.com/django/django/master/django/utils/dateparse.py at
10 | 9718fa2e8abe430c3526a9278dd976443d4ae3c6
11 |
12 | Changed to:
13 | * use standard python datetime types not django.utils.timezone
14 | * raise ValueError when regex doesn't match rather than returning None
15 | * support parsing unix timestamps for dates and datetimes
16 | """
17 | import re
18 | from datetime import date, datetime, time, timedelta, timezone
19 | from typing import Union
20 |
21 | date_re = re.compile(r'(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})$')
22 |
23 | time_re = re.compile(
24 | r'(?P\d{1,2}):(?P\d{1,2})'
25 | r'(?::(?P\d{1,2})(?:\.(?P\d{1,6})\d{0,6})?)?'
26 | )
27 |
28 | datetime_re = re.compile(
29 | r'(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})'
30 | r'[T ](?P\d{1,2}):(?P\d{1,2})'
31 | r'(?::(?P\d{1,2})(?:\.(?P\d{1,6})\d{0,6})?)?'
32 | r'(?PZ|[+-]\d{2}(?::?\d{2})?)?$'
33 | )
34 |
35 | standard_duration_re = re.compile(
36 | r'^'
37 | r'(?:(?P-?\d+) (days?, )?)?'
38 | r'((?:(?P-?\d+):)(?=\d+:\d+))?'
39 | r'(?:(?P-?\d+):)?'
40 | r'(?P-?\d+)'
41 | r'(?:\.(?P\d{1,6})\d{0,6})?'
42 | r'$'
43 | )
44 |
45 | # Support the sections of ISO 8601 date representation that are accepted by timedelta
46 | iso8601_duration_re = re.compile(
47 | r'^(?P[-+]?)'
48 | r'P'
49 | r'(?:(?P\d+(.\d+)?)D)?'
50 | r'(?:T'
51 | r'(?:(?P\d+(.\d+)?)H)?'
52 | r'(?:(?P\d+(.\d+)?)M)?'
53 | r'(?:(?P\d+(.\d+)?)S)?'
54 | r')?'
55 | r'$'
56 | )
57 |
58 | EPOCH = datetime(1970, 1, 1)
59 | MS_WATERSHED = int(1e11) # if greater than this, the number is in ms (in seconds this is 3rd March 5138)
60 | StrIntFloat = Union[str, int, float]
61 |
62 |
63 | def get_numeric(value: StrIntFloat):
64 | if isinstance(value, (int, float)):
65 | return value
66 | try:
67 | return int(value)
68 | except ValueError:
69 | pass
70 | try:
71 | return float(value)
72 | except ValueError:
73 | pass
74 |
75 |
76 | def from_unix_seconds(seconds: int) -> datetime:
77 | while seconds > MS_WATERSHED:
78 | seconds /= 1000
79 | dt = EPOCH + timedelta(seconds=seconds)
80 | return dt.replace(tzinfo=timezone.utc)
81 |
82 |
83 | def parse_date(value: StrIntFloat) -> date:
84 | """
85 | Parse a string and return a datetime.date.
86 |
87 | Raise ValueError if the input is well formatted but not a valid date.
88 | Raise ValueError if the input isn't well formatted.
89 | """
90 | number = get_numeric(value)
91 | if number:
92 | return from_unix_seconds(number).date()
93 |
94 | match = date_re.match(value)
95 | if not match:
96 | raise ValueError('Invalid date format')
97 |
98 | kw = {k: int(v) for k, v in match.groupdict().items()}
99 | return date(**kw)
100 |
101 |
102 | def parse_time(value: StrIntFloat) -> time:
103 | """
104 | Parse a string and return a datetime.time.
105 |
106 | This function doesn't support time zone offsets.
107 |
108 | Raise ValueError if the input is well formatted but not a valid time.
109 | Raise ValueError if the input isn't well formatted, in particular if it contains an offset.
110 | """
111 | match = time_re.match(value)
112 | if not match:
113 | raise ValueError('Invalid time format')
114 |
115 | kw = match.groupdict()
116 | if kw['microsecond']:
117 | kw['microsecond'] = kw['microsecond'].ljust(6, '0')
118 | kw = {k: int(v) for k, v in kw.items() if v is not None}
119 | return time(**kw)
120 |
121 |
122 | def parse_datetime(value: StrIntFloat) -> datetime:
123 | """
124 | Parse a string and return a datetime.datetime.
125 |
126 | This function supports time zone offsets. When the input contains one,
127 | the output uses a timezone with a fixed offset from UTC.
128 |
129 | Raise ValueError if the input is well formatted but not a valid datetime.
130 | Raise ValueError if the input isn't well formatted.
131 | """
132 | number = get_numeric(value)
133 | if number:
134 | return from_unix_seconds(number)
135 |
136 | match = datetime_re.match(value)
137 | if not match:
138 | raise ValueError('Invalid datetime format')
139 |
140 | kw = match.groupdict()
141 | if kw['microsecond']:
142 | kw['microsecond'] = kw['microsecond'].ljust(6, '0')
143 | tzinfo = kw.pop('tzinfo')
144 | if tzinfo == 'Z':
145 | tzinfo = timezone.utc
146 | elif tzinfo is not None:
147 | offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
148 | offset = 60 * int(tzinfo[1:3]) + offset_mins
149 | if tzinfo[0] == '-':
150 | offset = -offset
151 | tzinfo = timezone(timedelta(minutes=offset))
152 | kw = {k: int(v) for k, v in kw.items() if v is not None}
153 | kw['tzinfo'] = tzinfo
154 | return datetime(**kw)
155 |
156 |
157 | def parse_duration(value: StrIntFloat) -> timedelta:
158 | """
159 | Parse a duration string and return a datetime.timedelta.
160 |
161 | The preferred format for durations in Django is '%d %H:%M:%S.%f'.
162 |
163 | Also supports ISO 8601 representation.
164 | """
165 | if isinstance(value, (int, float)):
166 | # bellow code requires a string
167 | value = str(value)
168 |
169 | match = standard_duration_re.match(value) or iso8601_duration_re.match(value)
170 | if not match:
171 | raise ValueError('Invalid duration format')
172 |
173 | kw = match.groupdict()
174 | sign = -1 if kw.pop('sign', '+') == '-' else 1
175 | if kw.get('microseconds'):
176 | kw['microseconds'] = kw['microseconds'].ljust(6, '0')
177 | if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):
178 | kw['microseconds'] = '-' + kw['microseconds']
179 | kw = {k: float(v) for k, v in kw.items() if v is not None}
180 | return sign * timedelta(**kw)
181 |
--------------------------------------------------------------------------------
/pydantic/main.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 | from types import FunctionType
3 |
4 | from .exceptions import Error, Extra, Missing, ValidationError
5 | from .fields import Field
6 | from .validators import dict_validator
7 |
8 |
9 | class BaseConfig:
10 | min_anystr_length = 0
11 | max_anystr_length = 2 ** 16
12 | min_number_size = -2 ** 64
13 | max_number_size = 2 ** 64
14 | raise_exception = True
15 | validate_all = False
16 | ignore_extra = True
17 | allow_extra = False
18 | fields = None
19 |
20 |
21 | def inherit_config(self_config, parent_config) -> BaseConfig:
22 | if not self_config:
23 | return parent_config
24 | for k, v in parent_config.__dict__.items():
25 | if not (k.startswith('_') or hasattr(self_config, k)):
26 | setattr(self_config, k, v)
27 | return self_config
28 |
29 |
30 | TYPE_BLACKLIST = FunctionType, property, type, classmethod, staticmethod
31 |
32 |
33 | class MetaModel(type):
34 | @classmethod
35 | def __prepare__(mcs, *args, **kwargs):
36 | return OrderedDict()
37 |
38 | def __new__(mcs, name, bases, namespace):
39 | fields = OrderedDict()
40 | config = BaseConfig
41 | for base in reversed(bases):
42 | if issubclass(base, BaseModel) and base != BaseModel:
43 | fields.update(base.__fields__)
44 | config = inherit_config(base.config, config)
45 |
46 | annotations = namespace.get('__annotations__')
47 | config = inherit_config(namespace.get('Config'), config)
48 | class_validators = {n: f for n, f in namespace.items()
49 | if n.startswith('validate_') and isinstance(f, FunctionType)}
50 |
51 | config_fields = config.fields or {}
52 | for var_name, value in namespace.items():
53 | if var_name.startswith('_') or isinstance(value, TYPE_BLACKLIST):
54 | continue
55 | field_config = config_fields.get(var_name)
56 | if isinstance(field_config, str):
57 | field_config = {'alias': field_config}
58 | field = Field.infer(
59 | name=var_name,
60 | value=value,
61 | annotation=annotations and annotations.get(var_name),
62 | class_validators=class_validators,
63 | field_config=field_config,
64 | )
65 | fields[var_name] = field
66 | namespace.update(
67 | config=config,
68 | __fields__=fields,
69 | )
70 | return super().__new__(mcs, name, bases, namespace)
71 |
72 |
73 | MISSING = Missing('field required')
74 | MISSING_ERROR = Error(MISSING, None, None)
75 | EXTRA_ERROR = Error(Extra('extra fields not permitted'), None, None)
76 |
77 |
78 | class BaseModel(metaclass=MetaModel):
79 | # populated by the metaclass, defined here to help IDEs only
80 | __fields__ = {}
81 |
82 | def __init__(self, **values):
83 | self.__values__ = {}
84 | self.__errors__ = OrderedDict()
85 | self._process_values(values)
86 |
87 | def setattr(self, name, value):
88 | """
89 | alternative to setattr() which checks the field exists and updates __values__.
90 |
91 | This exists instead of overriding __setattr__ as that seems to cause a universal 10% slow down.
92 | """
93 | if not self.config.allow_extra and name not in self.__fields__:
94 | raise ValueError(f'"{self.__class__.__name__}" object has no field "{name}"')
95 | setattr(self, name, value)
96 | self.__values__[name] = value
97 |
98 | @property
99 | def values(self):
100 | return dict(self)
101 |
102 | @property
103 | def fields(self):
104 | return self.__fields__
105 |
106 | @property
107 | def errors(self):
108 | return self.__errors__
109 |
110 | @classmethod
111 | def get_validators(cls):
112 | yield dict_validator
113 | yield cls.validate
114 |
115 | @classmethod
116 | def validate(cls, value):
117 | return cls(**value)
118 |
119 | def _process_values(self, values):
120 | for name, field in self.__fields__.items():
121 | value = values.get(field.alias, MISSING)
122 | self._process_value(name, field.alias, field, value)
123 |
124 | if not self.config.ignore_extra or self.config.allow_extra:
125 | extra = values.keys() - {f.alias for f in self.__fields__.values()}
126 | if extra:
127 | if self.config.allow_extra:
128 | for field in extra:
129 | value = values[field]
130 | self.__values__[field] = value
131 | setattr(self, field, value)
132 | else:
133 | # config.ignore_extra is False
134 | for field in sorted(extra):
135 | self.__errors__[field] = EXTRA_ERROR
136 |
137 | if self.config.raise_exception and self.__errors__:
138 | raise ValidationError(self.__errors__)
139 |
140 | def _process_value(self, name, alias, field, value):
141 | if value is MISSING:
142 | if self.config.validate_all or field.validate_always:
143 | value = field.default
144 | else:
145 | if field.required:
146 | self.__errors__[alias] = MISSING_ERROR
147 | else:
148 | self.__values__[name] = field.default
149 | # could skip this if the attributes equals field.default, would it be quicker?
150 | setattr(self, name, field.default)
151 | return
152 |
153 | value, errors = field.validate(value, self)
154 | if errors:
155 | self.__errors__[alias] = errors
156 | self.__values__[name] = value
157 | setattr(self, name, value)
158 |
159 | @classmethod
160 | def _get_value(cls, v):
161 | if isinstance(v, BaseModel):
162 | return v.values
163 | elif isinstance(v, list):
164 | return [cls._get_value(v_) for v_ in v]
165 | elif isinstance(v, dict):
166 | return {k_: cls._get_value(v_) for k_, v_ in v.items()}
167 | elif isinstance(v, set):
168 | return {cls._get_value(v_) for v_ in v}
169 | elif isinstance(v, tuple):
170 | return tuple(cls._get_value(v_) for v_ in v)
171 | else:
172 | return v
173 |
174 | def __iter__(self):
175 | # so `dict(model)` works
176 | for k, v in self.__values__.items():
177 | yield k, self._get_value(v)
178 |
179 | def __eq__(self, other):
180 | if isinstance(other, BaseModel):
181 | return self.values == other.values
182 | else:
183 | return self.values == other
184 |
185 | def __repr__(self):
186 | return f'<{self}>'
187 |
188 | def __str__(self):
189 | return '{} {}'.format(self.__class__.__name__, ' '.join('{}={!r}'.format(k, v)
190 | for k, v in self.__values__.items()))
191 |
--------------------------------------------------------------------------------
/tests/test_main.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing import Any
3 |
4 | import pytest
5 |
6 | from pydantic import BaseModel, ConfigError, NoneBytes, NoneStr, ValidationError, pretty_errors
7 |
8 |
9 | class UltraSimpleModel(BaseModel):
10 | a: float = ...
11 | b: int = 10
12 |
13 |
14 | def test_ultra_simple_success():
15 | m = UltraSimpleModel(a=10.2)
16 | assert m.a == 10.2
17 | assert m.b == 10
18 |
19 |
20 | def test_ultra_simple_missing():
21 | with pytest.raises(ValidationError) as exc_info:
22 | UltraSimpleModel()
23 | assert """\
24 | 1 error validating input
25 | a:
26 | field required (error_type=Missing)""" == str(exc_info.value)
27 |
28 |
29 | def test_ultra_simple_failed():
30 | with pytest.raises(ValidationError) as exc_info:
31 | UltraSimpleModel(a='x', b='x')
32 | assert """\
33 | 2 errors validating input
34 | a:
35 | could not convert string to float: 'x' (error_type=ValueError track=float)
36 | b:
37 | invalid literal for int() with base 10: 'x' (error_type=ValueError track=int)\
38 | """ == str(exc_info.value)
39 |
40 |
41 | def test_ultra_simple_repr():
42 | m = UltraSimpleModel(a=10.2)
43 | assert repr(m) == ''
44 | assert repr(m.fields['a']) == ("")
46 | assert dict(m) == {'a': 10.2, 'b': 10}
47 |
48 |
49 | def test_comparing():
50 | m = UltraSimpleModel(a=10.2, b='100')
51 | assert m == {'a': 10.2, 'b': 100}
52 | assert m == UltraSimpleModel(a=10.2, b=100)
53 |
54 |
55 | class ConfigModel(UltraSimpleModel):
56 | class Config:
57 | raise_exception = False
58 |
59 |
60 | def test_config_doesnt_raise():
61 | m = ConfigModel()
62 | assert len(m.errors) == 1
63 | assert m.errors['a'].exc.args[0] == 'field required'
64 | assert m.config.raise_exception is False
65 | assert m.config.max_anystr_length == 65536
66 |
67 |
68 | def test_nullable_strings_success():
69 | class NoneCheckModel(BaseModel):
70 | existing_str_value = 'foo'
71 | required_str_value: str = ...
72 | required_str_none_value: NoneStr = ...
73 | existing_bytes_value = b'foo'
74 | required_bytes_value: bytes = ...
75 | required_bytes_none_value: NoneBytes = ...
76 |
77 | m = NoneCheckModel(
78 | required_str_value='v1',
79 | required_str_none_value=None,
80 | required_bytes_value='v2',
81 | required_bytes_none_value=None,
82 | )
83 | assert m.required_str_value == 'v1'
84 | assert m.required_str_none_value is None
85 | assert m.required_bytes_value == b'v2'
86 | assert m.required_bytes_none_value is None
87 |
88 |
89 | def test_nullable_strings_fails():
90 | class NoneCheckModel(BaseModel):
91 | existing_str_value = 'foo'
92 | required_str_value: str = ...
93 | required_str_none_value: NoneStr = ...
94 | existing_bytes_value = b'foo'
95 | required_bytes_value: bytes = ...
96 | required_bytes_none_value: NoneBytes = ...
97 |
98 | class Config:
99 | raise_exception = False
100 | m = NoneCheckModel(
101 | required_str_value=None,
102 | required_str_none_value=None,
103 | required_bytes_value=None,
104 | required_bytes_none_value=None,
105 | )
106 | assert """\
107 | {
108 | "required_bytes_value": {
109 | "error_msg": "None is not an allow value",
110 | "error_type": "TypeError",
111 | "index": null,
112 | "track": "bytes"
113 | },
114 | "required_str_value": {
115 | "error_msg": "None is not an allow value",
116 | "error_type": "TypeError",
117 | "index": null,
118 | "track": "str"
119 | }
120 | }""" == json.dumps(pretty_errors(m.errors), indent=2, sort_keys=True)
121 |
122 |
123 | class RecursiveModel(BaseModel):
124 | grape: bool = ...
125 | banana: UltraSimpleModel = ...
126 |
127 |
128 | def test_recursion():
129 | m = RecursiveModel(grape=1, banana={'a': 1})
130 | assert m.grape is True
131 | assert m.banana.a == 1.0
132 | assert m.banana.b == 10
133 | assert repr(m) == '>'
134 |
135 |
136 | def test_recursion_fails():
137 | with pytest.raises(ValidationError):
138 | RecursiveModel(grape=1, banana=123)
139 |
140 |
141 | class PreventExtraModel(BaseModel):
142 | foo = 'whatever'
143 |
144 | class Config:
145 | ignore_extra = False
146 |
147 |
148 | def test_prevent_extra_success():
149 | m = PreventExtraModel()
150 | assert m.foo == 'whatever'
151 | m = PreventExtraModel(foo=1)
152 | assert m.foo == '1'
153 |
154 |
155 | def test_prevent_extra_fails():
156 | with pytest.raises(ValidationError) as exc_info:
157 | PreventExtraModel(foo='ok', bar='wrong', spam='xx')
158 | assert exc_info.value.message == '2 errors validating input'
159 | assert """\
160 | bar:
161 | extra fields not permitted (error_type=Extra)
162 | spam:
163 | extra fields not permitted (error_type=Extra)""" == exc_info.value.display_errors
164 |
165 |
166 | class InvalidValidator:
167 | @classmethod
168 | def get_validators(cls):
169 | yield cls.has_wrong_arguments
170 |
171 | @classmethod
172 | def has_wrong_arguments(cls, value, bar):
173 | pass
174 |
175 |
176 | def test_invalid_validator():
177 | with pytest.raises(ConfigError) as exc_info:
178 | class InvalidValidatorModel(BaseModel):
179 | x: InvalidValidator = ...
180 | assert exc_info.value.args[0].startswith('Invalid signature for validator')
181 |
182 |
183 | def test_no_validator():
184 | with pytest.raises(ConfigError) as exc_info:
185 | class NoValidatorModel(BaseModel):
186 | x: object = ...
187 | assert exc_info.value.args[0] == "no validator found for "
188 |
189 |
190 | def test_unable_to_infer():
191 | with pytest.raises(ConfigError) as exc_info:
192 | class InvalidDefinitionModel(BaseModel):
193 | x = None
194 | assert exc_info.value.args[0] == 'unable to infer type for attribute "x"'
195 |
196 |
197 | def test_not_required():
198 | class Model(BaseModel):
199 | a: float = None
200 | assert Model(a=12.2).a == 12.2
201 | assert Model().a is None
202 | assert Model(a=None).a is None
203 |
204 |
205 | def test_infer_type():
206 | class Model(BaseModel):
207 | a = False
208 | b = ''
209 | c = 0
210 | assert Model().a is False
211 | assert Model().b == ''
212 | assert Model().c == 0
213 |
214 |
215 | def test_allow_extra():
216 | class Model(BaseModel):
217 | a: float = ...
218 |
219 | class Config:
220 | allow_extra = True
221 |
222 | assert Model(a='10.2', b=12).values == {'a': 10.2, 'b': 12}
223 |
224 |
225 | def test_set_attr():
226 | m = UltraSimpleModel(a=10.2)
227 | assert m.values == {'a': 10.2, 'b': 10}
228 | m.setattr('b', 20)
229 | assert m.values == {'a': 10.2, 'b': 20}
230 |
231 |
232 | def test_set_attr_invalid():
233 | m = UltraSimpleModel(a=10.2)
234 | assert m.values == {'a': 10.2, 'b': 10}
235 | with pytest.raises(ValueError) as exc_info:
236 | m.setattr('c', 20)
237 | assert '"UltraSimpleModel" object has no field "c"' in str(exc_info)
238 |
239 |
240 | def test_any():
241 | class AnyModel(BaseModel):
242 | a: Any = 10
243 |
244 | assert AnyModel().a == 10
245 | assert AnyModel(a='foobar').a == 'foobar'
246 |
247 |
248 | def test_alias():
249 | class Model(BaseModel):
250 | a = 'foobar'
251 |
252 | class Config:
253 | fields = {
254 | 'a': {'alias': '_a'}
255 | }
256 |
257 | assert Model().a == 'foobar'
258 | assert Model().values == {'a': 'foobar'}
259 | assert Model(_a='different').a == 'different'
260 | assert Model(_a='different').values == {'a': 'different'}
261 |
--------------------------------------------------------------------------------
/pydantic/fields.py:
--------------------------------------------------------------------------------
1 | import inspect
2 | from collections import OrderedDict
3 | from enum import IntEnum
4 | from typing import Any, List, Mapping, Set, Type, Union
5 |
6 | from .exceptions import ConfigError, Error, type_display
7 | from .validators import NoneType, find_validators, not_none_validator
8 |
9 |
10 | class ValidatorSignature(IntEnum):
11 | JUST_VALUE = 1
12 | VALUE_KWARGS = 2
13 | BOUND_METHOD = 3
14 |
15 |
16 | class Shape(IntEnum):
17 | SINGLETON = 1
18 | LIST = 2
19 | SET = 3
20 | MAPPING = 4
21 |
22 |
23 | class Field:
24 | __slots__ = ('type_', 'key_type_', 'sub_fields', 'key_field', 'validators', 'default', 'required',
25 | 'name', 'alias', 'description', 'info', 'validate_always', 'allow_none', 'shape', 'multipart')
26 |
27 | def __init__(
28 | self, *,
29 | name: str,
30 | type_: Type,
31 | alias: str=None,
32 | class_validators: dict=None,
33 | default: Any=None,
34 | required: bool=False,
35 | allow_none: bool=False,
36 | description: str=None):
37 |
38 | self.name: str = name
39 | self.alias: str = alias or name
40 | self.type_: type = type_
41 | self.key_type_: type = None
42 | self.validate_always: bool = getattr(self.type_, 'validate_always', False)
43 | self.sub_fields = None
44 | self.key_field: Field = None
45 | self.validators = []
46 | self.default: Any = default
47 | self.required: bool = required
48 | self.description: str = description
49 | self.allow_none: bool = allow_none
50 | self.shape: Shape = Shape.SINGLETON
51 | self.multipart = False
52 | self.info = {}
53 | self._prepare(class_validators or {})
54 |
55 | @classmethod
56 | def infer(cls, *, name, value, annotation, class_validators, field_config):
57 | required = value == Ellipsis
58 | return cls(
59 | name=name,
60 | type_=annotation,
61 | alias=field_config and field_config.get('alias'),
62 | class_validators=class_validators,
63 | default=None if required else value,
64 | required=required,
65 | description=field_config and field_config.get('description'),
66 | )
67 |
68 | @property
69 | def alt_alias(self):
70 | return self.name != self.alias
71 |
72 | def _prepare(self, class_validators):
73 | if self.default is not None and self.type_ is None:
74 | self.type_ = type(self.default)
75 |
76 | if self.type_ is None:
77 | raise ConfigError(f'unable to infer type for attribute "{self.name}"')
78 |
79 | if not self.required and not self.validate_always and self.default is None:
80 | self.allow_none = True
81 |
82 | self._populate_sub_fields(class_validators)
83 | if self.sub_fields is None:
84 | self._populate_validators(class_validators)
85 |
86 | self.info = OrderedDict([
87 | ('type', type_display(self.type_)),
88 | ('default', self.default),
89 | ('required', self.required)
90 | ])
91 | if self.required:
92 | self.info.pop('default')
93 | if self.multipart:
94 | self.info['sub_fields'] = self.sub_fields
95 | else:
96 | self.info['validators'] = [v[1].__qualname__ for v in self.validators]
97 |
98 | # TODO
99 | # if self.description:
100 | # self.info['description'] = self.description
101 |
102 | def _populate_sub_fields(self, class_validators):
103 | # typing interface is horrible, we have to do some ugly checks
104 | origin = getattr(self.type_, '__origin__', None)
105 | if origin is None:
106 | # field is not "typing" object eg. Union, Dict, List etc.
107 | return
108 |
109 | if origin is Union:
110 | types_ = []
111 | for type_ in self.type_.__args__:
112 | if type_ is NoneType:
113 | self.allow_none = True
114 | else:
115 | types_.append(type_)
116 | self.sub_fields = [Field(
117 | type_=t,
118 | class_validators=class_validators,
119 | default=self.default,
120 | required=self.required,
121 | allow_none=self.allow_none,
122 | name=f'{self.name}_{type_display(t)}'
123 | ) for t in types_]
124 | self.multipart = True
125 | elif issubclass(origin, List):
126 | self.type_ = self.type_.__args__[0]
127 | self.shape = Shape.LIST
128 | elif issubclass(origin, Set):
129 | self.type_ = self.type_.__args__[0]
130 | self.shape = Shape.SET
131 | else:
132 | assert issubclass(origin, Mapping)
133 | self.key_type_ = self.type_.__args__[0]
134 | self.type_ = self.type_.__args__[1]
135 | self.shape = Shape.MAPPING
136 | self.key_field = Field(
137 | type_=self.key_type_,
138 | class_validators=class_validators,
139 | default=self.default,
140 | required=self.required,
141 | allow_none=self.allow_none,
142 | name=f'key_{self.name}'
143 | )
144 |
145 | if self.sub_fields is None and getattr(self.type_, '__origin__', False):
146 | self.multipart = True
147 | self.sub_fields = [Field(
148 | type_=self.type_,
149 | class_validators=class_validators,
150 | default=self.default,
151 | required=self.required,
152 | allow_none=self.allow_none,
153 | name=f'_{self.name}'
154 | )]
155 |
156 | def _populate_validators(self, class_validators):
157 | get_validators = getattr(self.type_, 'get_validators', None)
158 | v_funcs = (
159 | class_validators.get(f'validate_{self.name}_pre'),
160 |
161 | *(get_validators() if get_validators else find_validators(self.type_)),
162 |
163 | class_validators.get(f'validate_{self.name}'),
164 | class_validators.get(f'validate_{self.name}_post'),
165 | )
166 | for f in v_funcs:
167 | if not f or (self.allow_none and f is not_none_validator):
168 | continue
169 | self.validators.append((
170 | _get_validator_signature(f),
171 | f,
172 | ))
173 |
174 | def validate(self, v, model, index=None):
175 | if self.allow_none and v is None:
176 | return None, None
177 |
178 | if self.shape is Shape.SINGLETON:
179 | return self._validate_singleton(v, model, index)
180 | elif self.shape is Shape.MAPPING:
181 | return self._validate_mapping(v, model)
182 | else:
183 | # list or set
184 | result, errors = self._validate_sequence(v, model)
185 | if not errors and self.shape is Shape.SET:
186 | return set(result), errors
187 | return result, errors
188 |
189 | def _validate_sequence(self, v, model):
190 | result, errors = [], []
191 | try:
192 | v_iter = enumerate(v)
193 | except TypeError as exc:
194 | return v, Error(exc, None, None)
195 | for i, v_ in v_iter:
196 | single_result, single_errors = self._validate_singleton(v_, model, i)
197 | if single_errors:
198 | errors.append(single_errors)
199 | else:
200 | result.append(single_result)
201 | if errors:
202 | return v, errors
203 | else:
204 | return result, None
205 |
206 | def _validate_mapping(self, v, model):
207 | if isinstance(v, dict):
208 | v_iter = v
209 | else:
210 | try:
211 | v_iter = dict(v)
212 | except TypeError as exc:
213 | return v, Error(exc, None, None)
214 |
215 | result, errors = {}, []
216 | for k, v_ in v_iter.items():
217 | key_result, key_errors = self.key_field.validate(k, model, 'key')
218 | if key_errors:
219 | errors.append(key_errors)
220 | continue
221 | value_result, value_errors = self._validate_singleton(v_, model, k)
222 | if value_errors:
223 | errors.append(value_errors)
224 | continue
225 | result[key_result] = value_result
226 | if errors:
227 | return v, errors
228 | else:
229 | return result, None
230 |
231 | def _validate_singleton(self, v, model, index):
232 | if self.multipart:
233 | errors = []
234 | for field in self.sub_fields:
235 | value, error = field.validate(v, model, index)
236 | if error:
237 | errors.append(error)
238 | else:
239 | return value, None
240 | return v, errors[0] if len(self.sub_fields) == 1 else errors
241 | else:
242 | for signature, validator in self.validators:
243 | try:
244 | if signature is ValidatorSignature.JUST_VALUE:
245 | v = validator(v)
246 | elif signature is ValidatorSignature.VALUE_KWARGS:
247 | v = validator(v, model=model, field=self)
248 | else:
249 | v = validator(model, v)
250 | except (ValueError, TypeError) as exc:
251 | return v, Error(exc, self.type_, index)
252 | return v, None
253 |
254 | def __repr__(self):
255 | return f''
256 |
257 | def __str__(self):
258 | return f'{self.name}: ' + ', '.join(f'{k}={v!r}' for k, v in self.info.items())
259 |
260 |
261 | def _get_validator_signature(validator):
262 | try:
263 | signature = inspect.signature(validator)
264 | except ValueError:
265 | # happens on builtins like float
266 | return ValidatorSignature.JUST_VALUE
267 |
268 | # bind here will raise a TypeError so:
269 | # 1. we can deal with it before validation begins
270 | # 2. (more importantly) it doesn't get confused with a TypeError when executing the validator
271 | try:
272 | if list(signature.parameters)[0] == 'self':
273 | signature.bind(object(), 1)
274 | return ValidatorSignature.BOUND_METHOD
275 | elif len(signature.parameters) == 1:
276 | signature.bind(1)
277 | return ValidatorSignature.JUST_VALUE
278 | else:
279 | signature.bind(1, model=2, field=3)
280 | return ValidatorSignature.VALUE_KWARGS
281 | except TypeError as e:
282 | raise ConfigError(f'Invalid signature for validator {validator}: {signature}, should be: '
283 | f'(value) or (value, *, model, field)') from e
284 |
--------------------------------------------------------------------------------
/tests/test_complex.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | from typing import Any, Dict, List, Set, Union
3 |
4 | import pytest
5 |
6 | from pydantic import BaseModel, NoneStrBytes, StrBytes, ValidationError
7 |
8 |
9 | def test_str_bytes():
10 | class StrBytesModel(BaseModel):
11 | v: StrBytes = ...
12 |
13 | m = StrBytesModel(v='s')
14 | assert m.v == 's'
15 | assert (", "
21 | "]>") == repr(m.fields['v'])
23 |
24 | m = StrBytesModel(v=b'b')
25 | assert m.v == 'b'
26 |
27 | with pytest.raises(ValidationError) as exc_info:
28 | StrBytesModel(v=None)
29 | assert exc_info.value.message == '1 error validating input'
30 | assert """\
31 | {
32 | "v": [
33 | {
34 | "error_msg": "None is not an allow value",
35 | "error_type": "TypeError",
36 | "index": null,
37 | "track": "str"
38 | },
39 | {
40 | "error_msg": "None is not an allow value",
41 | "error_type": "TypeError",
42 | "index": null,
43 | "track": "bytes"
44 | }
45 | ]
46 | }""" == exc_info.value.json(2)
47 |
48 |
49 | def test_str_bytes_none():
50 | class StrBytesModel(BaseModel):
51 | v: NoneStrBytes = ...
52 |
53 | m = StrBytesModel(v='s')
54 | assert m.v == 's'
55 |
56 | m = StrBytesModel(v=b'b')
57 | assert m.v == 'b'
58 |
59 | m = StrBytesModel(v=None)
60 | assert m.v is None
61 |
62 | # NOTE not_none_validator removed
63 | assert ("OrderedDict(["
64 | "('type', 'typing.Union[str, bytes, NoneType]'), "
65 | "('required', True), "
66 | "('sub_fields', ["
67 | ", "
68 | ""
69 | "])])") == repr(m.fields['v'].info)
70 |
71 |
72 | def test_union_int_str():
73 | class Model(BaseModel):
74 | v: Union[int, str] = ...
75 |
76 | m = Model(v=123)
77 | assert m.v == 123
78 |
79 | m = Model(v='123')
80 | assert m.v == 123
81 |
82 | m = Model(v=b'foobar')
83 | assert m.v == 'foobar'
84 |
85 | # here both validators work and it's impossible to work out which value "closer"
86 | m = Model(v=12.2)
87 | assert m.v == 12
88 |
89 | with pytest.raises(ValidationError) as exc_info:
90 | Model(v=None)
91 | assert exc_info.value.message == '1 error validating input'
92 | assert """\
93 | {
94 | "v": [
95 | {
96 | "error_msg": "int() argument must be a string, a bytes-like object or a number, not 'NoneType'",
97 | "error_type": "TypeError",
98 | "index": null,
99 | "track": "int"
100 | },
101 | {
102 | "error_msg": "None is not an allow value",
103 | "error_type": "TypeError",
104 | "index": null,
105 | "track": "str"
106 | }
107 | ]
108 | }""" == exc_info.value.json(2)
109 |
110 |
111 | def test_union_priority():
112 | class ModelOne(BaseModel):
113 | v: Union[int, str] = ...
114 |
115 | class ModelTwo(BaseModel):
116 | v: Union[str, int] = ...
117 |
118 | assert ModelOne(v='123').v == 123
119 | assert ModelTwo(v='123').v == '123'
120 |
121 |
122 | def test_typed_list():
123 | class Model(BaseModel):
124 | v: List[int] = ...
125 |
126 | m = Model(v=[1, 2, '3'])
127 | assert m.v == [1, 2, 3]
128 |
129 | with pytest.raises(ValidationError) as exc_info:
130 | Model(v=[1, 'x', 'y'])
131 | assert exc_info.value.message == '1 error validating input'
132 | assert """\
133 | {
134 | "v": [
135 | {
136 | "error_msg": "invalid literal for int() with base 10: 'x'",
137 | "error_type": "ValueError",
138 | "index": 1,
139 | "track": "int"
140 | },
141 | {
142 | "error_msg": "invalid literal for int() with base 10: 'y'",
143 | "error_type": "ValueError",
144 | "index": 2,
145 | "track": "int"
146 | }
147 | ]
148 | }""" == exc_info.value.json(2)
149 |
150 | with pytest.raises(ValidationError) as exc_info:
151 | Model(v=1)
152 | assert exc_info.value.message == '1 error validating input'
153 | assert """\
154 | {
155 | "v": {
156 | "error_msg": "'int' object is not iterable",
157 | "error_type": "TypeError",
158 | "index": null,
159 | "track": null
160 | }
161 | }""" == exc_info.value.json(2)
162 |
163 |
164 | def test_typed_set():
165 | class Model(BaseModel):
166 | v: Set[int] = ...
167 |
168 | assert Model(v={1, 2, '3'}).v == {1, 2, 3}
169 | assert Model(v=[1, 2, '3']).v == {1, 2, 3}
170 | with pytest.raises(ValidationError) as exc_info:
171 | Model(v=[1, 'x'])
172 | assert """\
173 | 1 error validating input
174 | v:
175 | invalid literal for int() with base 10: 'x' (error_type=ValueError track=int index=1)""" == str(exc_info.value)
176 |
177 |
178 | class DictModel(BaseModel):
179 | v: Dict[str, int] = ...
180 |
181 |
182 | def test_dict_values():
183 | assert DictModel(v={'foo': 1}).values == {'v': {'foo': 1}}
184 |
185 |
186 | @pytest.mark.parametrize('value,result', [
187 | ({'a': 2, 'b': 4}, {'a': 2, 'b': 4}),
188 | ({1: '2', 'b': 4}, {'1': 2, 'b': 4}),
189 | ([('a', 2), ('b', 4)], {'a': 2, 'b': 4}),
190 | ])
191 | def test_typed_dict(value, result):
192 | assert DictModel(v=value).v == result
193 |
194 |
195 | @pytest.mark.parametrize('value,error', [
196 | (
197 | 1,
198 | """\
199 | 1 error validating input
200 | v:
201 | 'int' object is not iterable (error_type=TypeError)"""
202 | ),
203 | (
204 | {'a': 'b'},
205 | """\
206 | 1 error validating input
207 | v:
208 | invalid literal for int() with base 10: 'b' (error_type=ValueError track=int index=a)"""
209 | ),
210 | (
211 | [1, 2, 3],
212 | """\
213 | 1 error validating input
214 | v:
215 | cannot convert dictionary update sequence element #0 to a sequence (error_type=TypeError)""",
216 | )
217 | ])
218 | def test_typed_dict_error(value, error):
219 | with pytest.raises(ValidationError) as exc_info:
220 | DictModel(v=value)
221 | assert error == str(exc_info.value)
222 |
223 |
224 | def test_dict_key_error():
225 | class DictIntModel(BaseModel):
226 | v: Dict[int, int] = ...
227 | assert DictIntModel(v={1: 2, '3': '4'}).v == {1: 2, 3: 4}
228 | with pytest.raises(ValidationError) as exc_info:
229 | DictIntModel(v={'foo': 2, '3': '4'})
230 | assert """\
231 | 1 error validating input
232 | v:
233 | invalid literal for int() with base 10: 'foo' (error_type=ValueError track=int index=key)""" == str(exc_info.value)
234 |
235 |
236 | def test_all_model_validator():
237 | class OverModel(BaseModel):
238 | a: int = ...
239 |
240 | def validate_a_pre(self, v):
241 | return f'{v}1'
242 |
243 | def validate_a(self, v):
244 | assert isinstance(v, int)
245 | return f'{v}_main'
246 |
247 | def validate_a_post(self, v):
248 | assert isinstance(v, str)
249 | return f'{v}_post'
250 |
251 | m = OverModel(a=1)
252 | assert m.a == '11_main_post'
253 |
254 |
255 | class SubModel(BaseModel):
256 | name: str = ...
257 | count: int = None
258 |
259 |
260 | class MasterListModel(BaseModel):
261 | v: List[SubModel] = []
262 |
263 |
264 | def test_recursive_list():
265 | m = MasterListModel(v=[])
266 | assert m.v == []
267 |
268 | m = MasterListModel(v=[{'name': 'testing', 'count': 4}])
269 | assert "]>" == repr(m)
270 | assert m.v[0].name == 'testing'
271 | assert m.v[0].count == 4
272 | assert m.values == {'v': [{'count': 4, 'name': 'testing'}]}
273 |
274 | with pytest.raises(ValidationError) as exc_info:
275 | MasterListModel(v=['x'])
276 | print(exc_info.value.json())
277 | assert 'dictionary update sequence element #0 has length 1; 2 is required' in str(exc_info.value)
278 |
279 |
280 | def test_recursive_list_error():
281 | with pytest.raises(ValidationError) as exc_info:
282 | MasterListModel(v=[{}])
283 |
284 | assert """\
285 | 1 error validating input
286 | v:
287 | 1 error validating input (error_type=ValidationError track=SubModel)
288 | name:
289 | field required (error_type=Missing)\
290 | """ == str(exc_info.value)
291 | assert """\
292 | {
293 | "v": [
294 | {
295 | "error_details": {
296 | "name": {
297 | "error_msg": "field required",
298 | "error_type": "Missing",
299 | "index": null,
300 | "track": null
301 | }
302 | },
303 | "error_msg": "1 error validating input",
304 | "error_type": "ValidationError",
305 | "index": 0,
306 | "track": "SubModel"
307 | }
308 | ]
309 | }""" == exc_info.value.json(2)
310 |
311 |
312 | def test_list_unions():
313 | class Model(BaseModel):
314 | v: List[Union[int, str]] = ...
315 |
316 | assert Model(v=[123, '456', 'foobar']).v == [123, 456, 'foobar']
317 |
318 | with pytest.raises(ValidationError) as exc_info:
319 | Model(v=[1, 2, None])
320 | assert """\
321 | 1 error validating input
322 | v:
323 | int() argument must be a string, a bytes-like object or a number, not 'NoneType' \
324 | (error_type=TypeError track=int index=2)
325 | None is not an allow value (error_type=TypeError track=str index=2)\
326 | """ == str(exc_info.value)
327 |
328 |
329 | def test_recursive_lists():
330 | class Model(BaseModel):
331 | v: List[List[Union[int, float]]] = ...
332 |
333 | assert Model(v=[[1, 2], [3, '4', '4.1']]).v == [[1, 2], [3, 4, 4.1]]
334 | assert Model.__fields__['v'].sub_fields[0].name == '_v'
335 | assert len(Model.__fields__['v'].sub_fields) == 1
336 | assert Model.__fields__['v'].sub_fields[0].sub_fields[0].name == '__v'
337 | assert len(Model.__fields__['v'].sub_fields[0].sub_fields) == 1
338 | assert Model.__fields__['v'].sub_fields[0].sub_fields[0].sub_fields[1].name == '__v_float'
339 | assert len(Model.__fields__['v'].sub_fields[0].sub_fields[0].sub_fields) == 2
340 |
341 |
342 | def test_str_enum():
343 | class StrEnum(str, Enum):
344 | a = 'a10'
345 | b = 'b10'
346 |
347 | class Model(BaseModel):
348 | v: StrEnum = ...
349 |
350 | assert Model(v='a10').v is StrEnum.a
351 |
352 | with pytest.raises(ValidationError):
353 | Model(v='different')
354 |
355 |
356 | def test_any_dict():
357 | class Model(BaseModel):
358 | v: Dict[int, Any] = ...
359 | assert Model(v={1: 'foobar'}).values == {'v': {1: 'foobar'}}
360 | assert Model(v={123: 456}).values == {'v': {123: 456}}
361 | assert Model(v={2: [1, 2, 3]}).values == {'v': {2: [1, 2, 3]}}
362 |
363 |
364 | def test_infer_alias():
365 | class Model(BaseModel):
366 | a = 'foobar'
367 |
368 | class Config:
369 | fields = {'a': '_a'}
370 |
371 | assert Model(_a='different').a == 'different'
372 |
373 |
374 | def test_alias_error():
375 | class Model(BaseModel):
376 | a = 123
377 |
378 | class Config:
379 | fields = {'a': '_a'}
380 |
381 | assert Model(_a='123').a == 123
382 | with pytest.raises(ValidationError) as exc_info:
383 | Model(_a='foo')
384 | assert """\
385 | 1 error validating input
386 | _a:
387 | invalid literal for int() with base 10: 'foo' (error_type=ValueError track=int)""" == str(exc_info.value)
388 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # pydantic documentation build configuration file, created by
4 | # sphinx-quickstart on Sat Aug 13 12:25:33 2016.
5 | #
6 | # This file is execfile()d with the current directory set to its
7 | # containing dir.
8 | #
9 | # Note that not all possible configuration values are present in this
10 | # autogenerated file.
11 | #
12 | # All configuration values have a default; values that are commented out
13 | # serve to show the default.
14 |
15 | # If extensions (or modules to document with autodoc) are in another directory,
16 | # add these directories to sys.path here. If the directory is relative to the
17 | # documentation root, use os.path.abspath to make it absolute, like shown here.
18 | #
19 | import os
20 | import sys
21 | sys.path.append(os.path.abspath('../pydantic'))
22 |
23 | # -- General configuration ------------------------------------------------
24 |
25 | # If your documentation needs a minimal Sphinx version, state it here.
26 | #
27 | # needs_sphinx = '1.0'
28 |
29 | # Add any Sphinx extension module names here, as strings. They can be
30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
31 | # ones.
32 | extensions = [
33 | 'sphinx.ext.autodoc',
34 | 'sphinx.ext.doctest',
35 | 'sphinx.ext.todo',
36 | 'sphinx.ext.coverage',
37 | 'sphinx.ext.ifconfig',
38 | 'sphinx.ext.viewcode',
39 | 'sphinx.ext.githubpages',
40 | ]
41 |
42 | autoclass_content = 'both'
43 | autodoc_member_order = 'bysource'
44 |
45 | # Add any paths that contain templates here, relative to this directory.
46 | templates_path = ['_templates']
47 |
48 | # The suffix(es) of source filenames.
49 | # You can specify multiple suffix as a list of string:
50 | #
51 | # source_suffix = ['.rst', '.md']
52 | source_suffix = '.rst'
53 |
54 | # The encoding of source files.
55 | #
56 | # source_encoding = 'utf-8-sig'
57 |
58 | # The master toctree document.
59 | master_doc = 'index'
60 |
61 | # General information about the project.
62 | project = 'pydantic'
63 | copyright = '2017, Samuel Colvin'
64 | author = 'Samuel Colvin'
65 |
66 | # The version info for the project you're documenting, acts as replacement for
67 | # |version| and |release|, also used in various other places throughout the
68 | # built documents.
69 |
70 | from pydantic.version import VERSION
71 | # The short X.Y version. Could change this if you're updating docs for a previous version.
72 | version = str(VERSION)
73 | # The full version, including alpha/beta/rc tags.
74 | release = str(VERSION)
75 |
76 | # The language for content autogenerated by Sphinx. Refer to documentation
77 | # for a list of supported languages.
78 | #
79 | # This is also used if you do content translation via gettext catalogs.
80 | # Usually you set "language" from the command line for these cases.
81 | language = None
82 |
83 | # There are two options for replacing |today|: either, you set today to some
84 | # non-false value, then it is used:
85 | #
86 | # today = ''
87 | #
88 | # Else, today_fmt is used as the format for a strftime call.
89 | #
90 | # today_fmt = '%B %d, %Y'
91 |
92 | # List of patterns, relative to source directory, that match files and
93 | # directories to ignore when looking for source files.
94 | # This patterns also effect to html_static_path and html_extra_path
95 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
96 |
97 | # The reST default role (used for this markup: `text`) to use for all
98 | # documents.
99 | #
100 | # default_role = None
101 |
102 | # If true, '()' will be appended to :func: etc. cross-reference text.
103 | #
104 | # add_function_parentheses = True
105 |
106 | # If true, the current module name will be prepended to all description
107 | # unit titles (such as .. function::).
108 | #
109 | # add_module_names = True
110 |
111 | # If true, sectionauthor and moduleauthor directives will be shown in the
112 | # output. They are ignored by default.
113 | #
114 | # show_authors = False
115 |
116 | # The name of the Pygments (syntax highlighting) style to use.
117 | pygments_style = 'sphinx'
118 |
119 | # A list of ignored prefixes for module index sorting.
120 | # modindex_common_prefix = []
121 |
122 | # If true, keep warnings as "system message" paragraphs in the built documents.
123 | # keep_warnings = False
124 |
125 | # If true, `todo` and `todoList` produce output, else they produce nothing.
126 | todo_include_todos = True
127 |
128 |
129 | # -- Options for HTML output ----------------------------------------------
130 |
131 | # The theme to use for HTML and HTML Help pages. See the documentation for
132 | # a list of builtin themes.
133 | #
134 | html_theme = 'alabaster'
135 |
136 | # Theme options are theme-specific and customize the look and feel of a theme
137 | # further. For a list of options available for each theme, see the
138 | # documentation.
139 | #
140 | html_theme_options = {
141 | 'github_user': 'samuelcolvin',
142 | 'github_repo': 'pydantic',
143 | 'travis_button': True,
144 | 'codecov_button': True,
145 | 'page_width': '1200px',
146 | 'github_banner': True,
147 | 'github_type': 'star',
148 | }
149 |
150 | # Add any paths that contain custom themes here, relative to this directory.
151 | # html_theme_path = []
152 |
153 | # The name for this set of Sphinx documents.
154 | # " v documentation" by default.
155 | #
156 | # html_title = 'pydantic v5'
157 |
158 | # A shorter title for the navigation bar. Default is the same as html_title.
159 | #
160 | # html_short_title = None
161 |
162 | # The name of an image file (relative to this directory) to place at the top
163 | # of the sidebar.
164 | #
165 | # html_logo = None
166 |
167 | # The name of an image file (relative to this directory) to use as a favicon of
168 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
169 | # pixels large.
170 | #
171 | # html_favicon = None
172 |
173 | # Add any paths that contain custom static files (such as style sheets) here,
174 | # relative to this directory. They are copied after the builtin static files,
175 | # so a file named "default.css" will overwrite the builtin "default.css".
176 | # html_static_path = ['_static']
177 |
178 | # Add any extra paths that contain custom files (such as robots.txt or
179 | # .htaccess) here, relative to this directory. These files are copied
180 | # directly to the root of the documentation.
181 | #
182 | # html_extra_path = []
183 |
184 | # If not None, a 'Last updated on:' timestamp is inserted at every page
185 | # bottom, using the given strftime format.
186 | # The empty string is equivalent to '%b %d, %Y'.
187 | #
188 | # html_last_updated_fmt = None
189 |
190 | # If true, SmartyPants will be used to convert quotes and dashes to
191 | # typographically correct entities.
192 | #
193 | # html_use_smartypants = True
194 |
195 | # Custom sidebar templates, maps document names to template names.
196 | #
197 | html_sidebars = {
198 | '**': [
199 | 'about.html',
200 | 'localtoc.html',
201 | # 'searchbox.html',
202 | ]
203 | }
204 |
205 | # Additional templates that should be rendered to pages, maps page names to
206 | # template names.
207 | #
208 | # html_additional_pages = {}
209 |
210 | # If false, no module index is generated.
211 | #
212 | # html_domain_indices = True
213 |
214 | # If false, no index is generated.
215 | #
216 | # html_use_index = True
217 |
218 | # If true, the index is split into individual pages for each letter.
219 | #
220 | # html_split_index = False
221 |
222 | # If true, links to the reST sources are added to the pages.
223 | #
224 | # html_show_sourcelink = True
225 |
226 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
227 | #
228 | # html_show_sphinx = True
229 |
230 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
231 | #
232 | # html_show_copyright = True
233 |
234 | # If true, an OpenSearch description file will be output, and all pages will
235 | # contain a tag referring to it. The value of this option must be the
236 | # base URL from which the finished HTML is served.
237 | #
238 | # html_use_opensearch = ''
239 |
240 | # This is the file name suffix for HTML files (e.g. ".xhtml").
241 | # html_file_suffix = None
242 |
243 | # Language to be used for generating the HTML full-text search index.
244 | # Sphinx supports the following languages:
245 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
246 | # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
247 | #
248 | # html_search_language = 'en'
249 |
250 | # A dictionary with options for the search language support, empty by default.
251 | # 'ja' uses this config value.
252 | # 'zh' user can custom change `jieba` dictionary path.
253 | #
254 | # html_search_options = {'type': 'default'}
255 |
256 | # The name of a javascript file (relative to the configuration directory) that
257 | # implements a search results scorer. If empty, the default will be used.
258 | #
259 | # html_search_scorer = 'scorer.js'
260 |
261 | # Output file base name for HTML help builder.
262 | htmlhelp_basename = 'pydanticdoc'
263 |
264 | # -- Options for LaTeX output ---------------------------------------------
265 |
266 | latex_elements = {
267 | # The paper size ('letterpaper' or 'a4paper').
268 | #
269 | # 'papersize': 'letterpaper',
270 |
271 | # The font size ('10pt', '11pt' or '12pt').
272 | #
273 | # 'pointsize': '10pt',
274 |
275 | # Additional stuff for the LaTeX preamble.
276 | #
277 | # 'preamble': '',
278 |
279 | # Latex figure (float) alignment
280 | #
281 | # 'figure_align': 'htbp',
282 | }
283 |
284 | # Grouping the document tree into LaTeX files. List of tuples
285 | # (source start file, target name, title,
286 | # author, documentclass [howto, manual, or own class]).
287 | latex_documents = [
288 | (master_doc, 'pydantic.tex', 'pydantic Documentation', 'Samuel Colvin', 'manual'),
289 | ]
290 |
291 | # The name of an image file (relative to this directory) to place at the top of
292 | # the title page.
293 | #
294 | # latex_logo = None
295 |
296 | # For "manual" documents, if this is true, then toplevel headings are parts,
297 | # not chapters.
298 | #
299 | # latex_use_parts = False
300 |
301 | # If true, show page references after internal links.
302 | #
303 | # latex_show_pagerefs = False
304 |
305 | # If true, show URL addresses after external links.
306 | #
307 | # latex_show_urls = False
308 |
309 | # Documents to append as an appendix to all manuals.
310 | #
311 | # latex_appendices = []
312 |
313 | # It false, will not define \strong, \code, itleref, \crossref ... but only
314 | # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
315 | # packages.
316 | #
317 | # latex_keep_old_macro_names = True
318 |
319 | # If false, no module index is generated.
320 | #
321 | # latex_domain_indices = True
322 |
323 |
324 | # -- Options for manual page output ---------------------------------------
325 |
326 | # One entry per manual page. List of tuples
327 | # (source start file, name, description, authors, manual section).
328 | man_pages = [
329 | (master_doc, 'pydantic', 'pydantic Documentation', [author], 1)
330 | ]
331 |
332 | # If true, show URL addresses after external links.
333 | #
334 | # man_show_urls = False
335 |
336 |
337 | # -- Options for Texinfo output -------------------------------------------
338 |
339 | # Grouping the document tree into Texinfo files. List of tuples
340 | # (source start file, target name, title, author,
341 | # dir menu entry, description, category)
342 | texinfo_documents = [
343 | (master_doc, 'pydantic', 'pydantic Documentation', author, 'pydantic'),
344 | ]
345 |
346 | # Documents to append as an appendix to all manuals.
347 | #
348 | # texinfo_appendices = []
349 |
350 | # If false, no module index is generated.
351 | #
352 | # texinfo_domain_indices = True
353 |
354 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
355 | #
356 | # texinfo_show_urls = 'footnote'
357 |
358 | # If true, do not generate a @detailmenu in the "Top" node's menu.
359 | #
360 | # texinfo_no_detailmenu = False
361 | suppress_warnings = ['image.nonlocal_uri']
362 |
--------------------------------------------------------------------------------
/tests/test_types.py:
--------------------------------------------------------------------------------
1 | import os
2 | from collections import OrderedDict
3 | from datetime import date, datetime, time, timedelta
4 | from enum import Enum, IntEnum
5 |
6 | import pytest
7 |
8 | from pydantic import (DSN, BaseModel, EmailStr, NameEmail, NegativeInt, PositiveInt, PyObject, ValidationError, conint,
9 | constr)
10 |
11 |
12 | class ConStringModel(BaseModel):
13 | v: constr(max_length=10) = 'foobar'
14 |
15 |
16 | def test_constrained_str_good():
17 | m = ConStringModel(v='short')
18 | assert m.v == 'short'
19 |
20 |
21 | def test_constrained_str_default():
22 | m = ConStringModel()
23 | assert m.v == 'foobar'
24 |
25 |
26 | def test_constrained_str_too_long():
27 | with pytest.raises(ValidationError) as exc_info:
28 | ConStringModel(v='this is too long')
29 | assert """\
30 | {
31 | "v": {
32 | "error_msg": "length greater than maximum allowed: 10",
33 | "error_type": "ValueError",
34 | "index": null,
35 | "track": "ConstrainedStrValue"
36 | }
37 | }""" == exc_info.value.json(2)
38 |
39 |
40 | class DsnModel(BaseModel):
41 | db_name = 'foobar'
42 | db_user = 'postgres'
43 | db_password: str = None
44 | db_host = 'localhost'
45 | db_port = '5432'
46 | db_driver = 'postgres'
47 | db_query: dict = None
48 | dsn: DSN = None
49 |
50 |
51 | def test_dsn_compute():
52 | m = DsnModel()
53 | assert m.dsn == 'postgres://postgres@localhost:5432/foobar'
54 |
55 |
56 | def test_dsn_define():
57 | m = DsnModel(dsn='postgres://postgres@localhost:5432/different')
58 | assert m.dsn == 'postgres://postgres@localhost:5432/different'
59 |
60 |
61 | def test_dsn_pw_host():
62 | m = DsnModel(db_password='pword', db_host='before:after', db_query={'v': 1})
63 | assert m.dsn == 'postgres://postgres:pword@[before:after]:5432/foobar?v=1'
64 |
65 |
66 | def test_dsn_no_driver():
67 | with pytest.raises(ValidationError) as exc_info:
68 | DsnModel(db_driver=None)
69 | assert '"db_driver" field may not be missing or None' in str(exc_info.value)
70 |
71 |
72 | class PyObjectModel(BaseModel):
73 | module: PyObject = 'os.path'
74 |
75 |
76 | def test_module_import():
77 | m = PyObjectModel()
78 | assert m.module == os.path
79 | with pytest.raises(ValidationError) as exc_info:
80 | PyObjectModel(module='foobar')
81 | assert '"foobar" doesn\'t look like a module path' in str(exc_info.value)
82 |
83 |
84 | class CheckModel(BaseModel):
85 | bool_check = True
86 | str_check = 's'
87 | bytes_check = b's'
88 | int_check = 1
89 | float_check = 1.0
90 |
91 | class Config:
92 | max_anystr_length = 10
93 | max_number_size = 100
94 |
95 |
96 | @pytest.mark.parametrize('field,value,result', [
97 | ('bool_check', True, True),
98 | ('bool_check', False, False),
99 | ('bool_check', None, False),
100 | ('bool_check', '', False),
101 | ('bool_check', 1, True),
102 | ('bool_check', 'TRUE', True),
103 | ('bool_check', b'TRUE', True),
104 | ('bool_check', 'true', True),
105 | ('bool_check', '1', True),
106 | ('bool_check', '2', False),
107 | ('bool_check', 2, True),
108 | ('bool_check', 'on', True),
109 | ('bool_check', 'yes', True),
110 |
111 | ('str_check', 's', 's'),
112 | ('str_check', b's', 's'),
113 | ('str_check', 1, '1'),
114 | ('str_check', 'x' * 11, ValidationError),
115 | ('str_check', b'x' * 11, ValidationError),
116 |
117 | ('bytes_check', 's', b's'),
118 | ('bytes_check', b's', b's'),
119 | ('bytes_check', 1, b'1'),
120 | ('bytes_check', 'x' * 11, ValidationError),
121 | ('bytes_check', b'x' * 11, ValidationError),
122 |
123 | ('int_check', 1, 1),
124 | ('int_check', 1.9, 1),
125 | ('int_check', '1', 1),
126 | ('int_check', '1.9', ValidationError),
127 | ('int_check', b'1', 1),
128 | ('int_check', 12, 12),
129 | ('int_check', '12', 12),
130 | ('int_check', b'12', 12),
131 | ('int_check', 123, ValidationError),
132 | ('int_check', '123', ValidationError),
133 | ('int_check', b'123', ValidationError),
134 |
135 | ('float_check', 1, 1.0),
136 | ('float_check', 1.0, 1.0),
137 | ('float_check', '1.0', 1.0),
138 | ('float_check', '1', 1.0),
139 | ('float_check', b'1.0', 1.0),
140 | ('float_check', b'1', 1.0),
141 | ('float_check', 123, ValidationError),
142 | ('float_check', '123', ValidationError),
143 | ('float_check', b'123', ValidationError),
144 | ])
145 | def test_default_validators(field, value, result):
146 | kwargs = {field: value}
147 | if result == ValidationError:
148 | with pytest.raises(ValidationError):
149 | CheckModel(**kwargs)
150 | else:
151 | assert CheckModel(**kwargs).values[field] == result
152 |
153 |
154 | class DatetimeModel(BaseModel):
155 | dt: datetime = ...
156 | date_: date = ...
157 | time_: time = ...
158 | duration: timedelta = ...
159 |
160 |
161 | def test_datetime_successful():
162 | m = DatetimeModel(
163 | dt='2017-10-5T19:47:07',
164 | date_=1494012000,
165 | time_='10:20:30.400',
166 | duration='15:30.0001',
167 | )
168 | assert m.dt == datetime(2017, 10, 5, 19, 47, 7)
169 | assert m.date_ == date(2017, 5, 5)
170 | assert m.time_ == time(10, 20, 30, 400000)
171 | assert m.duration == timedelta(minutes=15, seconds=30, microseconds=100)
172 |
173 |
174 | def test_datetime_errors():
175 | with pytest.raises(ValueError) as exc_info:
176 | DatetimeModel(
177 | dt='2017-13-5T19:47:07',
178 | date_='XX1494012000',
179 | time_='25:20:30.400',
180 | duration='15:30.0001 broken',
181 | )
182 | assert exc_info.value.message == '4 errors validating input'
183 | assert """\
184 | {
185 | "date_": {
186 | "error_msg": "Invalid date format",
187 | "error_type": "ValueError",
188 | "index": null,
189 | "track": "date"
190 | },
191 | "dt": {
192 | "error_msg": "month must be in 1..12",
193 | "error_type": "ValueError",
194 | "index": null,
195 | "track": "datetime"
196 | },
197 | "duration": {
198 | "error_msg": "Invalid duration format",
199 | "error_type": "ValueError",
200 | "index": null,
201 | "track": "timedelta"
202 | },
203 | "time_": {
204 | "error_msg": "hour must be in 0..23",
205 | "error_type": "ValueError",
206 | "index": null,
207 | "track": "time"
208 | }
209 | }""" == exc_info.value.json(2)
210 |
211 |
212 | class FruitEnum(str, Enum):
213 | pear = 'pear'
214 | banana = 'banana'
215 |
216 |
217 | class ToolEnum(IntEnum):
218 | spanner = 1
219 | wrench = 2
220 |
221 |
222 | class CookingModel(BaseModel):
223 | fruit: FruitEnum = FruitEnum.pear
224 | tool: ToolEnum = ToolEnum.spanner
225 |
226 |
227 | def test_enum_successful():
228 | m = CookingModel(tool=2)
229 | assert m.fruit == FruitEnum.pear
230 | assert m.tool == ToolEnum.wrench
231 | assert repr(m.tool) == ''
232 |
233 |
234 | def test_enum_fails():
235 | with pytest.raises(ValueError) as exc_info:
236 | CookingModel(tool=3)
237 | assert exc_info.value.message == '1 error validating input'
238 | assert """\
239 | {
240 | "tool": {
241 | "error_msg": "3 is not a valid ToolEnum",
242 | "error_type": "ValueError",
243 | "index": null,
244 | "track": "ToolEnum"
245 | }
246 | }""" == exc_info.value.json(2)
247 |
248 |
249 | class MoreStringsModel(BaseModel):
250 | str_regex: constr(regex=r'^xxx\d{3}$') = ...
251 | str_min_length: constr(min_length=5) = ...
252 | str_curtailed: constr(curtail_length=5) = ...
253 | str_email: EmailStr = ...
254 | name_email: NameEmail = ...
255 |
256 |
257 | def test_string_success():
258 | m = MoreStringsModel(
259 | str_regex='xxx123',
260 | str_min_length='12345',
261 | str_curtailed='123456',
262 | str_email='foobar@example.com ',
263 | name_email='foo bar ',
264 | )
265 | assert m.str_regex == 'xxx123'
266 | assert m.str_curtailed == '12345'
267 | assert m.str_email == 'foobar@example.com'
268 | assert repr(m.name_email) == '")>'
269 | assert m.name_email.name == 'foo bar'
270 | assert m.name_email.email == 'foobar@example.com'
271 |
272 |
273 | def test_string_fails():
274 | with pytest.raises(ValidationError) as exc_info:
275 | MoreStringsModel(
276 | str_regex='xxx123 ',
277 | str_min_length='1234',
278 | str_curtailed='123', # doesn't fail
279 | str_email='foobar\n@example.com',
280 | name_email='foobar @example.com',
281 | )
282 | assert exc_info.value.message == '4 errors validating input'
283 | assert """\
284 | {
285 | "name_email": {
286 | "error_msg": "Email address is not valid",
287 | "error_type": "ValueError",
288 | "index": null,
289 | "track": "NameEmail"
290 | },
291 | "str_email": {
292 | "error_msg": "Email address is not valid",
293 | "error_type": "ValueError",
294 | "index": null,
295 | "track": "EmailStr"
296 | },
297 | "str_min_length": {
298 | "error_msg": "length less than minimum allowed: 5",
299 | "error_type": "ValueError",
300 | "index": null,
301 | "track": "ConstrainedStrValue"
302 | },
303 | "str_regex": {
304 | "error_msg": "string does not match regex \\"^xxx\\\\d{3}$\\"",
305 | "error_type": "ValueError",
306 | "index": null,
307 | "track": "ConstrainedStrValue"
308 | }
309 | }""" == exc_info.value.json(2)
310 |
311 |
312 | class ListDictTupleModel(BaseModel):
313 | a: dict = None
314 | b: list = None
315 | c: OrderedDict = None
316 | d: tuple = None
317 |
318 |
319 | def test_dict():
320 | assert ListDictTupleModel(a={1: 10, 2: 20}).a == {1: 10, 2: 20}
321 | assert ListDictTupleModel(a=[(1, 2), (3, 4)]).a == {1: 2, 3: 4}
322 | with pytest.raises(ValidationError) as exc_info:
323 | ListDictTupleModel(a=[1, 2, 3])
324 | assert 'cannot convert dictionary update sequence element #0 to a sequence' in str(exc_info.value)
325 |
326 |
327 | def test_list():
328 | m = ListDictTupleModel(b=[1, 2, '3'])
329 | assert m.a is None
330 | assert m.b == [1, 2, '3']
331 | assert ListDictTupleModel(b='xyz').b == ['x', 'y', 'z']
332 | assert ListDictTupleModel(b=(i**2 for i in range(5))).b == [0, 1, 4, 9, 16]
333 | with pytest.raises(ValidationError) as exc_info:
334 | ListDictTupleModel(b=1)
335 | assert "'int' object is not iterable" in str(exc_info.value)
336 |
337 |
338 | def test_ordered_dict():
339 | assert ListDictTupleModel(c=OrderedDict([(1, 10), (2, 20)])).c == OrderedDict([(1, 10), (2, 20)])
340 | assert ListDictTupleModel(c={1: 10, 2: 20}).c in (OrderedDict([(1, 10), (2, 20)]), OrderedDict([(2, 20), (1, 10)]))
341 | assert ListDictTupleModel(c=[(1, 2), (3, 4)]).c == OrderedDict([(1, 2), (3, 4)])
342 | with pytest.raises(ValidationError) as exc_info:
343 | ListDictTupleModel(c=[1, 2, 3])
344 | assert "'int' object is not iterable" in str(exc_info.value)
345 |
346 |
347 | def test_tuple():
348 | m = ListDictTupleModel(d=(1, 2, '3'))
349 | assert m.a is None
350 | assert m.d == (1, 2, '3')
351 | assert m.values == {'a': None, 'b': None, 'c': None, 'd': (1, 2, '3')}
352 | assert ListDictTupleModel(d='xyz').d == ('x', 'y', 'z')
353 | assert ListDictTupleModel(d=(i**2 for i in range(5))).d == (0, 1, 4, 9, 16)
354 | with pytest.raises(ValidationError) as exc_info:
355 | ListDictTupleModel(d=1)
356 | assert "'int' object is not iterable" in str(exc_info.value)
357 |
358 |
359 | class IntModel(BaseModel):
360 | a: PositiveInt = None
361 | b: NegativeInt = None
362 | c: conint(gt=4, lt=10) = None
363 |
364 |
365 | def test_int_validation():
366 | m = IntModel(a=5, b=-5, c=5)
367 | assert m == {'a': 5, 'b': -5, 'c': 5}
368 | with pytest.raises(ValidationError) as exc_info:
369 | IntModel(a=-5, b=5, c=-5)
370 | assert exc_info.value.message == '3 errors validating input'
371 |
372 |
373 | def test_set():
374 | class SetModel(BaseModel):
375 | v: set = ...
376 |
377 | m = SetModel(v=[1, 2, 3])
378 | assert m.v == {1, 2, 3}
379 | assert m.values == {'v': {1, 2, 3}}
380 | assert SetModel(v={'a', 'b', 'c'}).v == {'a', 'b', 'c'}
381 |
--------------------------------------------------------------------------------