├── .github
└── workflows
│ ├── pythonpackage.yml
│ └── pythonpublish.yml
├── .gitignore
├── CONTRIBUTING.md
├── LICENSE.md
├── MANIFEST.in
├── README.md
├── build.sh
├── ddt.py
├── docs
├── Makefile
├── api.rst
├── conf.py
├── example.rst
├── faq.rst
└── index.rst
├── release.sh
├── requirements
├── build.txt
├── release.txt
└── test.txt
├── rtdocs.sh
├── setup.cfg
├── setup.py
├── test
├── __init__.py
├── data
│ ├── test_custom_yaml_loader.yaml
│ ├── test_data_dict.json
│ ├── test_data_dict.yaml
│ ├── test_data_dict_dict.json
│ ├── test_data_dict_dict.yaml
│ ├── test_data_list.json
│ ├── test_data_list.yaml
│ └── test_functional_custom_tags.yaml
├── mycode.py
├── test_async.py
├── test_example.py
├── test_functional.py
└── test_named_data.py
└── tox.ini
/.github/workflows/pythonpackage.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Run Tests
5 |
6 | on:
7 | push:
8 | branches: [ master ]
9 | pull_request:
10 | branches: [ master ]
11 |
12 | jobs:
13 | build:
14 | runs-on: ubuntu-20.04
15 | timeout-minutes: 5
16 | strategy:
17 | matrix:
18 | python-version: [3.6, 3.7, 3.8, 3.9, '3.10']
19 | steps:
20 | - uses: actions/checkout@v2
21 | - name: Set up Python ${{ matrix.python-version }}
22 | uses: actions/setup-python@v4
23 | with:
24 | python-version: ${{ matrix.python-version }}
25 | - name: Install dependencies
26 | run: |
27 | python -m pip install --upgrade pip
28 | pip install tox tox-gh-actions
29 | pip install -r requirements/test.txt
30 | - name: Test with pytest
31 | run: |
32 | tox
33 |
--------------------------------------------------------------------------------
/.github/workflows/pythonpublish.yml:
--------------------------------------------------------------------------------
1 | # This workflows will upload a Python Package using Twine when a release is created
2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3 |
4 | name: Upload Python Package
5 |
6 | on:
7 | release:
8 | types: [created]
9 |
10 | jobs:
11 | deploy:
12 |
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - uses: actions/checkout@v2
17 | - name: Set up Python
18 | uses: actions/setup-python@v1
19 | with:
20 | python-version: '3.x'
21 | - name: Install dependencies
22 | run: |
23 | python -m pip install --upgrade pip
24 | pip install -r requirements/release.txt
25 | - name: Build and publish
26 | env:
27 | TWINE_USERNAME: __token__
28 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
29 | run: |
30 | python setup.py sdist bdist_wheel
31 | twine upload dist/*
32 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.orig
2 | *.pyc
3 | *.swp
4 | build/
5 | dist/
6 | ddt.egg-info/
7 | .coverage
8 | /cover/
9 | /docs/_build/
10 | .tox
11 | .ropeproject
12 | venv/
13 | htmlcov/
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to DDT
2 |
3 | ## Guidelines
4 |
5 | We'll be happy if you want to contribute to the improvement of `ddt`.
6 |
7 | Code contributions will take the form of pull requests to
8 | [the github repo](https://github.com/datadriventests/ddt).
9 |
10 | Your PRs are more likely to be merged quickly if:
11 |
12 | - They adhere to coding conventions in the repo (PEP8)
13 | - They include tests
14 |
15 | ## Building
16 |
17 | PRs to `ddt` are always built by Travis-CI on Python 2 and 3.
18 |
19 | If you want to build `ddt` locally, the simplest way is to use `tox`:
20 |
21 | ```
22 | pip install tox
23 | tox
24 | ```
25 |
26 | This will run tests on various releases of python (2 and 3, as long as they
27 | are installed in your computer), run `flake8` and build the Sphinx
28 | documentation.
29 |
30 | Alternatively, if you only want to run tests on your active version of python,
31 | I recommend you make yourself a virtual environment and:
32 |
33 | ```
34 | pip install -r requirements/build.txt
35 | ./build.sh
36 | ```
37 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Copyright © 2015 Carles Barrobés and additional contributors.
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of
4 | this software and associated documentation files (the “Software”), to deal in
5 | the Software without restriction, including without limitation the rights to
6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
7 | of the Software, and to permit persons to whom the Software is furnished to do
8 | so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in all
11 | copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 | SOFTWARE.
20 |
21 | ([MIT License](http://mit-license.org/))
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.md
2 | recursive-include test *.py *.json *.yaml
3 | include MANIFEST.in
4 | include LICENSE.md
5 | include tox.ini
6 | global-exclude *.py[cod] __pycache__
7 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 | [](https://codecov.io/github/datadriventests/ddt)
3 |
4 | [](https://pypi.python.org/pypi/ddt)
5 | [](https://pypi.python.org/pypi/ddt)
6 |
7 | DDT (Data-Driven Tests) allows you to multiply one test case
8 | by running it with different test data, and make it appear as
9 | multiple test cases.
10 |
11 | # Installation
12 |
13 |
14 | ```pip install ddt```
15 |
16 | Check out [the documentation](http://ddt.readthedocs.org/) for more details.
17 |
18 | See [Contributing](CONTRIBUTING.md) if you plan to contribute to `ddt`,
19 | and [License](LICENSE.md) if you plan to use it.
20 |
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | pytest --cov=ddt --cov-report html
3 | flake8 ddt.py test || echo "Flake8 errors"
4 | (cd docs; make html)
5 |
--------------------------------------------------------------------------------
/ddt.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # This file is a part of DDT (https://github.com/datadriventests/ddt)
3 | # Copyright 2012-2015 Carles Barrobés and DDT contributors
4 | # For the exact contribution history, see the git revision log.
5 | # DDT is licensed under the MIT License, included in
6 | # https://github.com/datadriventests/ddt/blob/master/LICENSE.md
7 |
8 | import codecs
9 | import inspect
10 | import json
11 | import os
12 | import re
13 | from enum import Enum, unique
14 | from functools import wraps
15 |
16 | try:
17 | import yaml
18 | except ImportError: # pragma: no cover
19 | _have_yaml = False
20 | else:
21 | _have_yaml = True
22 |
23 | from collections.abc import Sequence
24 |
25 |
26 | __version__ = '1.7.2'
27 |
28 | # These attributes will not conflict with any real python attribute
29 | # They are added to the decorated test method and processed later
30 | # by the `ddt` class decorator.
31 |
32 | DATA_ATTR = '%values' # store the data the test must run with
33 | FILE_ATTR = '%file_path' # store the path to JSON file
34 | YAML_LOADER_ATTR = '%yaml_loader' # store custom yaml loader for serialization
35 | UNPACK_ATTR = '%unpack' # remember that we have to unpack values
36 | INDEX_LEN = '%index_len' # store the index length of the data
37 |
38 |
39 | # These are helper classes for @named_data that allow ddt tests to have meaningful names.
40 | class _NamedDataList(list):
41 | def __init__(self, name, *args):
42 | super(_NamedDataList, self).__init__(args)
43 | self.name = name
44 |
45 | def __str__(self):
46 | return str(self.name)
47 |
48 |
49 | class _NamedDataDict(dict):
50 | def __init__(self, **kwargs):
51 | if "name" not in kwargs.keys():
52 | raise KeyError("@named_data expects a dictionary with a 'name' key.")
53 | self.name = kwargs.pop('name')
54 | super(_NamedDataDict, self).__init__(kwargs)
55 |
56 | def __str__(self):
57 | return str(self.name)
58 |
59 |
60 | trivial_types = (type(None), bool, int, float, _NamedDataList, _NamedDataDict)
61 | try:
62 | trivial_types += (basestring, )
63 | except NameError:
64 | trivial_types += (str, )
65 |
66 |
67 | @unique
68 | class TestNameFormat(Enum):
69 | """
70 | An enum to configure how ``mk_test_name()`` to compose a test name. Given
71 | the following example:
72 |
73 | .. code-block:: python
74 |
75 | @data("a", "b")
76 | def testSomething(self, value):
77 | ...
78 |
79 | if using just ``@ddt`` or together with ``DEFAULT``:
80 |
81 | * ``testSomething_1_a``
82 | * ``testSomething_2_b``
83 |
84 | if using ``INDEX_ONLY``:
85 |
86 | * ``testSomething_1``
87 | * ``testSomething_2``
88 |
89 | """
90 | DEFAULT = 0
91 | INDEX_ONLY = 1
92 |
93 |
94 | def is_trivial(value):
95 | if isinstance(value, trivial_types):
96 | return True
97 | elif isinstance(value, (list, tuple)):
98 | return all(map(is_trivial, value))
99 | return False
100 |
101 |
102 | def unpack(func):
103 | """
104 | Method decorator to add unpack feature.
105 |
106 | """
107 | setattr(func, UNPACK_ATTR, True)
108 | return func
109 |
110 |
111 | def data(*values):
112 | """
113 | Method decorator to add to your test methods.
114 |
115 | Should be added to methods of instances of ``unittest.TestCase``.
116 |
117 | """
118 | return idata(values)
119 |
120 |
121 | def idata(iterable, index_len=None):
122 | """
123 | Method decorator to add to your test methods.
124 |
125 | Should be added to methods of instances of ``unittest.TestCase``.
126 |
127 | :param iterable: iterable of the values to provide to the test function.
128 | :param index_len: an optional integer specifying the width to zero-pad the
129 | test identifier indices to. If not provided, this will add the fewest
130 | zeros necessary to make all identifiers the same length.
131 | """
132 | if index_len is None:
133 | # Avoid consuming a one-time-use generator.
134 | iterable = tuple(iterable)
135 | index_len = len(str(len(iterable)))
136 |
137 | def wrapper(func):
138 | setattr(func, DATA_ATTR, iterable)
139 | setattr(func, INDEX_LEN, index_len)
140 | return func
141 |
142 | return wrapper
143 |
144 |
145 | def file_data(value, yaml_loader=None):
146 | """
147 | Method decorator to add to your test methods.
148 |
149 | Should be added to methods of instances of ``unittest.TestCase``.
150 |
151 | ``value`` should be a path relative to the directory of the file
152 | containing the decorated ``unittest.TestCase``. The file
153 | should contain JSON encoded data, that can either be a list or a
154 | dict.
155 |
156 | In case of a list, each value in the list will correspond to one
157 | test case, and the value will be concatenated to the test method
158 | name.
159 |
160 | In case of a dict, keys will be used as suffixes to the name of the
161 | test case, and values will be fed as test data.
162 |
163 | ``yaml_loader`` can be used to customize yaml deserialization.
164 | The default is ``None``, which results in using the ``yaml.safe_load``
165 | method.
166 | """
167 | def wrapper(func):
168 | setattr(func, FILE_ATTR, value)
169 | if yaml_loader:
170 | setattr(func, YAML_LOADER_ATTR, yaml_loader)
171 | return func
172 | return wrapper
173 |
174 |
175 | def mk_test_name(name, value, index=0, index_len=5, name_fmt=TestNameFormat.DEFAULT):
176 | """
177 | Generate a new name for a test case.
178 |
179 | It will take the original test name and append an ordinal index and a
180 | string representation of the value, and convert the result into a valid
181 | python identifier by replacing extraneous characters with ``_``.
182 |
183 | We avoid doing str(value) if dealing with non-trivial values.
184 | The problem is possible different names with different runs, e.g.
185 | different order of dictionary keys (see PYTHONHASHSEED) or dealing
186 | with mock objects.
187 | Trivial scalar values are passed as is.
188 |
189 | A "trivial" value is a plain scalar, or a tuple or list consisting
190 | only of trivial values.
191 |
192 | The test name format is controlled by enum ``TestNameFormat`` as well. See
193 | the enum documentation for further details.
194 | """
195 |
196 | # Add zeros before index to keep order
197 | index = "{0:0{1}}".format(index + 1, index_len)
198 | if name_fmt is TestNameFormat.INDEX_ONLY or not is_trivial(value):
199 | return "{0}_{1}".format(name, index)
200 | try:
201 | value = str(value)
202 | except UnicodeEncodeError:
203 | # fallback for python2
204 | value = value.encode('ascii', 'backslashreplace')
205 | test_name = "{0}_{1}_{2}".format(name, index, value)
206 | return re.sub(r'\W|^(?=\d)', '_', test_name)
207 |
208 |
209 | def feed_data(func, new_name, test_data_docstring, *args, **kwargs):
210 | """
211 | This internal method decorator feeds the test data item to the test.
212 |
213 | """
214 | if inspect.iscoroutinefunction(func):
215 | @wraps(func)
216 | async def wrapper(self):
217 | return await func(self, *args, **kwargs)
218 | else:
219 | @wraps(func)
220 | def wrapper(self):
221 | return func(self, *args, **kwargs)
222 |
223 | wrapper.__name__ = new_name
224 | wrapper.__wrapped__ = func
225 | # set docstring if exists
226 | if test_data_docstring is not None:
227 | wrapper.__doc__ = test_data_docstring
228 | else:
229 | # Try to call format on the docstring
230 | if func.__doc__:
231 | try:
232 | wrapper.__doc__ = func.__doc__.format(*args, **kwargs)
233 | except (IndexError, KeyError):
234 | # Maybe the user has added some of the formating strings
235 | # unintentionally in the docstring. Do not raise an exception
236 | # as it could be that user is not aware of the
237 | # formating feature.
238 | pass
239 | return wrapper
240 |
241 |
242 | def add_test(cls, test_name, test_docstring, func, *args, **kwargs):
243 | """
244 | Add a test case to this class.
245 |
246 | The test will be based on an existing function but will give it a new
247 | name.
248 |
249 | """
250 | setattr(cls, test_name, feed_data(func, test_name, test_docstring,
251 | *args, **kwargs))
252 |
253 |
254 | def process_file_data(cls, name, func, file_attr):
255 | """
256 | Process the parameter in the `file_data` decorator.
257 | """
258 | cls_path = os.path.abspath(inspect.getsourcefile(cls))
259 | data_file_path = os.path.join(os.path.dirname(cls_path), file_attr)
260 |
261 | def create_error_func(message): # pylint: disable-msg=W0613
262 | def func(*args):
263 | raise ValueError(message % file_attr)
264 | return func
265 |
266 | # If file does not exist, provide an error function instead
267 | if not os.path.exists(data_file_path):
268 | test_name = mk_test_name(name, "error")
269 | test_docstring = """Error!"""
270 | add_test(cls, test_name, test_docstring,
271 | create_error_func("%s does not exist"), None)
272 | return
273 |
274 | _is_yaml_file = data_file_path.endswith((".yml", ".yaml"))
275 |
276 | # Don't have YAML but want to use YAML file.
277 | if _is_yaml_file and not _have_yaml:
278 | test_name = mk_test_name(name, "error")
279 | test_docstring = """Error!"""
280 | add_test(
281 | cls,
282 | test_name,
283 | test_docstring,
284 | create_error_func("%s is a YAML file, please install PyYAML"),
285 | None
286 | )
287 | return
288 |
289 | with codecs.open(data_file_path, 'r', 'utf-8') as f:
290 | # Load the data from YAML or JSON
291 | if _is_yaml_file:
292 | if hasattr(func, YAML_LOADER_ATTR):
293 | yaml_loader = getattr(func, YAML_LOADER_ATTR)
294 | data = yaml.load(f, Loader=yaml_loader)
295 | else:
296 | data = yaml.safe_load(f)
297 | else:
298 | data = json.load(f)
299 |
300 | _add_tests_from_data(cls, name, func, data)
301 |
302 |
303 | def _add_tests_from_data(cls, name, func, data):
304 | """
305 | Add tests from data loaded from the data file into the class
306 | """
307 | index_len = len(str(len(data)))
308 | for i, elem in enumerate(data):
309 | if isinstance(data, dict):
310 | key, value = elem, data[elem]
311 | test_name = mk_test_name(name, key, i, index_len)
312 | elif isinstance(data, list):
313 | value = elem
314 | test_name = mk_test_name(name, value, i, index_len)
315 | if isinstance(value, dict):
316 | add_test(cls, test_name, test_name, func, **value)
317 | else:
318 | add_test(cls, test_name, test_name, func, value)
319 |
320 |
321 | def _is_primitive(obj):
322 | """Finds out if the obj is a "primitive". It is somewhat hacky but it works.
323 | """
324 | return not hasattr(obj, '__dict__')
325 |
326 |
327 | def _get_test_data_docstring(func, value):
328 | """Returns a docstring based on the following resolution strategy:
329 | 1. Passed value is not a "primitive" and has a docstring, then use it.
330 | 2. In all other cases return None, i.e the test name is used.
331 | """
332 | if not _is_primitive(value) and value.__doc__:
333 | return value.__doc__
334 | else:
335 | return None
336 |
337 |
338 | def ddt(arg=None, **kwargs):
339 | """
340 | Class decorator for subclasses of ``unittest.TestCase``.
341 |
342 | Apply this decorator to the test case class, and then
343 | decorate test methods with ``@data``.
344 |
345 | For each method decorated with ``@data``, this will effectively create as
346 | many methods as data items are passed as parameters to ``@data``.
347 |
348 | The names of the test methods follow the pattern
349 | ``original_test_name_{ordinal}_{data}``. ``ordinal`` is the position of the
350 | data argument, starting with 1.
351 |
352 | For data we use a string representation of the data value converted into a
353 | valid python identifier. If ``data.__name__`` exists, we use that instead.
354 |
355 | For each method decorated with ``@file_data('test_data.json')``, the
356 | decorator will try to load the test_data.json file located relative
357 | to the python file containing the method that is decorated. It will,
358 | for each ``test_name`` key create as many methods in the list of values
359 | from the ``data`` key.
360 |
361 | Decorating with the keyword argument ``testNameFormat`` can control the
362 | format of the generated test names. For example:
363 |
364 | - ``@ddt(testNameFormat=TestNameFormat.DEFAULT)`` will be index and values.
365 |
366 | - ``@ddt(testNameFormat=TestNameFormat.INDEX_ONLY)`` will be index only.
367 |
368 | - ``@ddt`` is the same as DEFAULT.
369 |
370 | """
371 | fmt_test_name = kwargs.get("testNameFormat", TestNameFormat.DEFAULT)
372 |
373 | def wrapper(cls):
374 | for name, func in list(cls.__dict__.items()):
375 | if hasattr(func, DATA_ATTR):
376 | index_len = getattr(func, INDEX_LEN)
377 | for i, v in enumerate(getattr(func, DATA_ATTR)):
378 | test_name = mk_test_name(
379 | name,
380 | getattr(v, "__name__", v),
381 | i,
382 | index_len,
383 | fmt_test_name
384 | )
385 | test_data_docstring = _get_test_data_docstring(func, v)
386 | if hasattr(func, UNPACK_ATTR):
387 | if isinstance(v, tuple) or isinstance(v, list):
388 | add_test(
389 | cls,
390 | test_name,
391 | test_data_docstring,
392 | func,
393 | *v
394 | )
395 | else:
396 | # unpack dictionary
397 | add_test(
398 | cls,
399 | test_name,
400 | test_data_docstring,
401 | func,
402 | **v
403 | )
404 | else:
405 | add_test(cls, test_name, test_data_docstring, func, v)
406 | delattr(cls, name)
407 | elif hasattr(func, FILE_ATTR):
408 | file_attr = getattr(func, FILE_ATTR)
409 | process_file_data(cls, name, func, file_attr)
410 | delattr(cls, name)
411 | return cls
412 |
413 | # ``arg`` is the unittest's test class when decorating with ``@ddt`` while
414 | # it is ``None`` when decorating a test class with ``@ddt(k=v)``.
415 | return wrapper(arg) if inspect.isclass(arg) else wrapper
416 |
417 |
418 | def named_data(*named_values):
419 | """
420 | This decorator is to allow for meaningful names to be given to tests that would otherwise use @ddt.data and
421 | @ddt.unpack.
422 |
423 | Example of original ddt usage:
424 | @ddt.ddt
425 | class TestExample(TemplateTest):
426 | @ddt.data(
427 | [0, 1],
428 | [10, 11]
429 | )
430 | @ddt.unpack
431 | def test_values(self, value1, value2):
432 | ...
433 |
434 | Example of new usage:
435 | @ddt.ddt
436 | class TestExample(TemplateTest):
437 | @named_data(
438 | ['LabelA', 0, 1],
439 | ['LabelB', 10, 11],
440 | )
441 | def test_values(self, value1, value2):
442 | ...
443 |
444 | Note that @unpack is not used.
445 |
446 | :param Sequence[Any] | dict[Any,Any] named_values: Each named_value should be a Sequence (e.g. list or tuple) with
447 | the name as the first element, or a dictionary with 'name' as one of the keys. The name will be coerced to a
448 | string and all other values will be passed unchanged to the test.
449 | """
450 | values = []
451 | for named_value in named_values:
452 | if not isinstance(named_value, (Sequence, dict)):
453 | raise TypeError(
454 | "@named_data expects a Sequence (list, tuple) or dictionary, and not '{}'.".format(type(named_value))
455 | )
456 |
457 | value = _NamedDataDict(**named_value) if isinstance(named_value, dict) \
458 | else _NamedDataList(named_value[0], *named_value[1:])
459 |
460 | # Remove the __doc__ attribute so @ddt.data doesn't add the NamedData class docstrings to the test name.
461 | value.__doc__ = None
462 |
463 | values.append(value)
464 |
465 | def wrapper(func):
466 | data(*values)(unpack(func))
467 | return func
468 |
469 | return wrapper
470 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = _build
9 |
10 | # Internal variables.
11 | PAPEROPT_a4 = -D latex_paper_size=a4
12 | PAPEROPT_letter = -D latex_paper_size=letter
13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
14 | # the i18n builder cannot share the environment and doctrees with the others
15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
16 |
17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
18 |
19 | help:
20 | @echo "Please use \`make ' where is one of"
21 | @echo " html to make standalone HTML files"
22 | @echo " dirhtml to make HTML files named index.html in directories"
23 | @echo " singlehtml to make a single large HTML file"
24 | @echo " pickle to make pickle files"
25 | @echo " json to make JSON files"
26 | @echo " htmlhelp to make HTML files and a HTML help project"
27 | @echo " qthelp to make HTML files and a qthelp project"
28 | @echo " devhelp to make HTML files and a Devhelp project"
29 | @echo " epub to make an epub"
30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
31 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
32 | @echo " text to make text files"
33 | @echo " man to make manual pages"
34 | @echo " texinfo to make Texinfo files"
35 | @echo " info to make Texinfo files and run them through makeinfo"
36 | @echo " gettext to make PO message catalogs"
37 | @echo " changes to make an overview of all changed/added/deprecated items"
38 | @echo " linkcheck to check all external links for integrity"
39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
40 |
41 | clean:
42 | -rm -rf $(BUILDDIR)/*
43 |
44 | html:
45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
46 | @echo
47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
48 |
49 | dirhtml:
50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
51 | @echo
52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
53 |
54 | singlehtml:
55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
56 | @echo
57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
58 |
59 | pickle:
60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
61 | @echo
62 | @echo "Build finished; now you can process the pickle files."
63 |
64 | json:
65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
66 | @echo
67 | @echo "Build finished; now you can process the JSON files."
68 |
69 | htmlhelp:
70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
71 | @echo
72 | @echo "Build finished; now you can run HTML Help Workshop with the" \
73 | ".hhp project file in $(BUILDDIR)/htmlhelp."
74 |
75 | qthelp:
76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
77 | @echo
78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/DDT.qhcp"
81 | @echo "To view the help file:"
82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/DDT.qhc"
83 |
84 | devhelp:
85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
86 | @echo
87 | @echo "Build finished."
88 | @echo "To view the help file:"
89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/DDT"
90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/DDT"
91 | @echo "# devhelp"
92 |
93 | epub:
94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
95 | @echo
96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
97 |
98 | latex:
99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
100 | @echo
101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
103 | "(use \`make latexpdf' here to do that automatically)."
104 |
105 | latexpdf:
106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
107 | @echo "Running LaTeX files through pdflatex..."
108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
110 |
111 | text:
112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
113 | @echo
114 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
115 |
116 | man:
117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
118 | @echo
119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
120 |
121 | texinfo:
122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
123 | @echo
124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
125 | @echo "Run \`make' in that directory to run these through makeinfo" \
126 | "(use \`make info' here to do that automatically)."
127 |
128 | info:
129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
130 | @echo "Running Texinfo files through makeinfo..."
131 | make -C $(BUILDDIR)/texinfo info
132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
133 |
134 | gettext:
135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
136 | @echo
137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
138 |
139 | changes:
140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
141 | @echo
142 | @echo "The overview file is in $(BUILDDIR)/changes."
143 |
144 | linkcheck:
145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
146 | @echo
147 | @echo "Link check complete; look for any errors in the above output " \
148 | "or in $(BUILDDIR)/linkcheck/output.txt."
149 |
150 | doctest:
151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
152 | @echo "Testing of doctests in the sources finished, look at the " \
153 | "results in $(BUILDDIR)/doctest/output.txt."
154 |
--------------------------------------------------------------------------------
/docs/api.rst:
--------------------------------------------------------------------------------
1 | API
2 | ===
3 |
4 | .. automodule:: ddt
5 | :members:
6 |
7 | .. automodule:: named_data
8 | :members:
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # DDT documentation build configuration file, created by
4 | # sphinx-quickstart on Tue Feb 21 23:00:01 2012.
5 | #
6 | # This file is execfile()d with the current directory set to its containing dir.
7 | #
8 | # Note that not all possible configuration values are present in this
9 | # autogenerated file.
10 | #
11 | # All configuration values have a default; values that are commented out
12 | # serve to show the default.
13 |
14 | import sys, os
15 |
16 | # Specific for readthedocs.org
17 | on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
18 |
19 | # If extensions (or modules to document with autodoc) are in another directory,
20 | # add these directories to sys.path here. If the directory is relative to the
21 | # documentation root, use os.path.abspath to make it absolute, like shown here.
22 | #sys.path.insert(0, os.path.abspath('.'))
23 | docs_root = os.path.dirname(__file__)
24 | sys.path.insert(0, os.path.split(docs_root)[0])
25 |
26 | # -- General configuration -----------------------------------------------------
27 |
28 | # If your documentation needs a minimal Sphinx version, state it here.
29 | #needs_sphinx = '1.0'
30 |
31 | # Add any Sphinx extension module names here, as strings. They can be extensions
32 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
33 | extensions = ['sphinx.ext.autodoc']
34 | if not on_rtd:
35 | extensions.append('sphinxcontrib.programoutput')
36 |
37 | # Add any paths that contain templates here, relative to this directory.
38 | templates_path = ['_templates']
39 |
40 | # The suffix of source filenames.
41 | source_suffix = '.rst'
42 |
43 | # The encoding of source files.
44 | #source_encoding = 'utf-8-sig'
45 |
46 | # The master toctree document.
47 | master_doc = 'index'
48 |
49 | # General information about the project.
50 | project = u'DDT'
51 | # pylint: disable-msg=W0622
52 | # - copyright is a builtin
53 | copyright = u'2012, Carles Barrobés'
54 |
55 | # The version info for the project you're documenting, acts as replacement for
56 | # |version| and |release|, also used in various other places throughout the
57 | # built documents.
58 |
59 | from ddt import __version__
60 | # The short X.Y version.
61 | version = __version__
62 | # The full version, including alpha/beta/rc tags.
63 | release = __version__
64 |
65 | # The language for content autogenerated by Sphinx. Refer to documentation
66 | # for a list of supported languages.
67 | #language = None
68 |
69 | # There are two options for replacing |today|: either, you set today to some
70 | # non-false value, then it is used:
71 | #today = ''
72 | # Else, today_fmt is used as the format for a strftime call.
73 | #today_fmt = '%B %d, %Y'
74 |
75 | # List of patterns, relative to source directory, that match files and
76 | # directories to ignore when looking for source files.
77 | exclude_patterns = ['_build']
78 |
79 | # The reST default role (used for this markup: `text`) to use for all documents.
80 | #default_role = None
81 |
82 | # If true, '()' will be appended to :func: etc. cross-reference text.
83 | #add_function_parentheses = True
84 |
85 | # If true, the current module name will be prepended to all description
86 | # unit titles (such as .. function::).
87 | #add_module_names = True
88 |
89 | # If true, sectionauthor and moduleauthor directives will be shown in the
90 | # output. They are ignored by default.
91 | #show_authors = False
92 |
93 | # The name of the Pygments (syntax highlighting) style to use.
94 | pygments_style = 'sphinx'
95 |
96 | # A list of ignored prefixes for module index sorting.
97 | #modindex_common_prefix = []
98 |
99 |
100 | # -- Options for HTML output ---------------------------------------------------
101 |
102 | # The theme to use for HTML and HTML Help pages. See the documentation for
103 | # a list of builtin themes.
104 | if on_rtd:
105 | html_theme = 'default'
106 | else:
107 | html_theme = 'sphinxdoc'
108 |
109 | # Theme options are theme-specific and customize the look and feel of a theme
110 | # further. For a list of options available for each theme, see the
111 | # documentation.
112 | #html_theme_options = {}
113 |
114 | # Add any paths that contain custom themes here, relative to this directory.
115 | #html_theme_path = []
116 |
117 | # The name for this set of Sphinx documents. If None, it defaults to
118 | # " v documentation".
119 | #html_title = None
120 |
121 | # A shorter title for the navigation bar. Default is the same as html_title.
122 | #html_short_title = None
123 |
124 | # The name of an image file (relative to this directory) to place at the top
125 | # of the sidebar.
126 | #html_logo = None
127 |
128 | # The name of an image file (within the static path) to use as favicon of the
129 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
130 | # pixels large.
131 | #html_favicon = None
132 |
133 | # Add any paths that contain custom static files (such as style sheets) here,
134 | # relative to this directory. They are copied after the builtin static files,
135 | # so a file named "default.css" will overwrite the builtin "default.css".
136 | html_static_path = ['_static']
137 |
138 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
139 | # using the given strftime format.
140 | #html_last_updated_fmt = '%b %d, %Y'
141 |
142 | # If true, SmartyPants will be used to convert quotes and dashes to
143 | # typographically correct entities.
144 | #html_use_smartypants = True
145 |
146 | # Custom sidebar templates, maps document names to template names.
147 | #html_sidebars = {}
148 |
149 | # Additional templates that should be rendered to pages, maps page names to
150 | # template names.
151 | #html_additional_pages = {}
152 |
153 | # If false, no module index is generated.
154 | #html_domain_indices = True
155 |
156 | # If false, no index is generated.
157 | #html_use_index = True
158 |
159 | # If true, the index is split into individual pages for each letter.
160 | #html_split_index = False
161 |
162 | # If true, links to the reST sources are added to the pages.
163 | #html_show_sourcelink = True
164 |
165 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
166 | #html_show_sphinx = True
167 |
168 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
169 | #html_show_copyright = True
170 |
171 | # If true, an OpenSearch description file will be output, and all pages will
172 | # contain a tag referring to it. The value of this option must be the
173 | # base URL from which the finished HTML is served.
174 | #html_use_opensearch = ''
175 |
176 | # This is the file name suffix for HTML files (e.g. ".xhtml").
177 | #html_file_suffix = None
178 |
179 | # Output file base name for HTML help builder.
180 | htmlhelp_basename = 'DDTdoc'
181 |
182 |
183 | # -- Options for LaTeX output --------------------------------------------------
184 |
185 | latex_elements = {
186 | # The paper size ('letterpaper' or 'a4paper').
187 | #'papersize': 'letterpaper',
188 |
189 | # The font size ('10pt', '11pt' or '12pt').
190 | #'pointsize': '10pt',
191 |
192 | # Additional stuff for the LaTeX preamble.
193 | #'preamble': '',
194 | }
195 |
196 | # Grouping the document tree into LaTeX files. List of tuples
197 | # (source start file, target name, title, author, documentclass [howto/manual]).
198 | latex_documents = [
199 | ('index', 'DDT.tex', u'DDT Documentation',
200 | u'Carles Barrobés', 'manual'),
201 | ]
202 |
203 | # The name of an image file (relative to this directory) to place at the top of
204 | # the title page.
205 | #latex_logo = None
206 |
207 | # For "manual" documents, if this is true, then toplevel headings are parts,
208 | # not chapters.
209 | #latex_use_parts = False
210 |
211 | # If true, show page references after internal links.
212 | #latex_show_pagerefs = False
213 |
214 | # If true, show URL addresses after external links.
215 | #latex_show_urls = False
216 |
217 | # Documents to append as an appendix to all manuals.
218 | #latex_appendices = []
219 |
220 | # If false, no module index is generated.
221 | #latex_domain_indices = True
222 |
223 |
224 | # -- Options for manual page output --------------------------------------------
225 |
226 | # One entry per manual page. List of tuples
227 | # (source start file, name, description, authors, manual section).
228 | man_pages = [
229 | ('index', 'ddt', u'DDT Documentation',
230 | [u'Carles Barrobés'], 1)
231 | ]
232 |
233 | # If true, show URL addresses after external links.
234 | #man_show_urls = False
235 |
236 |
237 | # -- Options for Texinfo output ------------------------------------------------
238 |
239 | # Grouping the document tree into Texinfo files. List of tuples
240 | # (source start file, target name, title, author,
241 | # dir menu entry, description, category)
242 | texinfo_documents = [
243 | ('index', 'DDT', u'DDT Documentation',
244 | u'Carles Barrobés', 'DDT', 'One line description of project.',
245 | 'Miscellaneous'),
246 | ]
247 |
248 | # Documents to append as an appendix to all manuals.
249 | #texinfo_appendices = []
250 |
251 | # If false, no module index is generated.
252 | #texinfo_domain_indices = True
253 |
254 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
255 | #texinfo_show_urls = 'footnote'
256 |
--------------------------------------------------------------------------------
/docs/example.rst:
--------------------------------------------------------------------------------
1 | Example usage
2 | =============
3 |
4 | DDT consists of a class decorator ``ddt`` (for your ``TestCase`` subclass)
5 | and two method decorators (for your tests that want to be multiplied):
6 |
7 | * ``data``: contains as many arguments as values you want to feed to the test.
8 | * ``file_data``: will load test data from a JSON or YAML file.
9 |
10 | .. note::
11 |
12 | Only files ending with ".yml" and ".yaml" are loaded as YAML files. All
13 | other files are loaded as JSON files.
14 |
15 | Normally each value within ``data`` will be passed as a single argument to
16 | your test method. If these values are e.g. tuples, you will have to unpack them
17 | inside your test. Alternatively, you can use an additional decorator,
18 | ``unpack``, that will automatically unpack tuples and lists into multiple
19 | arguments, and dictionaries into multiple keyword arguments. See examples
20 | below.
21 |
22 | This allows you to write your tests as:
23 |
24 | .. literalinclude:: ../test/test_example.py
25 | :language: python
26 |
27 | Where ``test_data_dict_dict.json``:
28 |
29 | .. literalinclude:: ../test/data/test_data_dict_dict.json
30 | :language: javascript
31 |
32 | and ``test_data_dict_dict.yaml``:
33 |
34 | .. literalinclude:: ../test/data/test_data_dict_dict.yaml
35 | :language: yaml
36 |
37 | and ``test_data_dict.json``:
38 |
39 | .. literalinclude:: ../test/data/test_data_dict.json
40 | :language: javascript
41 |
42 | and ``test_data_dict.yaml``:
43 |
44 | .. literalinclude:: ../test/data/test_data_dict.yaml
45 | :language: yaml
46 |
47 | and ``test_data_list.json``:
48 |
49 | .. literalinclude:: ../test/data/test_data_list.json
50 | :language: javascript
51 |
52 | and ``test_data_list.yaml``:
53 |
54 | .. literalinclude:: ../test/data/test_data_list.yaml
55 | :language: yaml
56 |
57 | And then run them with your favourite test runner, e.g. if you use pytest::
58 |
59 | $ pytest test/test_example.py
60 |
61 | ..
62 | program-output:: pytest ../test/test_example.py
63 |
64 | The number of test cases actually run and reported separately has been
65 | multiplied.
66 |
67 |
68 | DDT will try to give the new test cases meaningful names by converting the
69 | data values to valid python identifiers.
70 |
71 |
72 | .. note::
73 |
74 | Python 2.7.3 introduced *hash randomization* which is by default
75 | enabled on Python 3.3 and later. DDT's default mechanism to
76 | generate meaningful test names will **not** use the test data value
77 | as part of the name for complex types if hash randomization is
78 | enabled.
79 |
80 | You can disable hash randomization by setting the
81 | ``PYTHONHASHSEED`` environment variable to a fixed value before
82 | running tests (``export PYTHONHASHSEED=1`` for example).
83 |
--------------------------------------------------------------------------------
/docs/faq.rst:
--------------------------------------------------------------------------------
1 | Known Issues and FAQ
2 | ====================
3 |
4 | Docstring Handling
5 | ------------------
6 |
7 | If one of the passed data objects has a docstring, the resulting testcase borrows it.
8 |
9 | .. code-block:: python
10 |
11 | d1 = Dataobj()
12 | d1.__doc__ = """This is a new docstring"""
13 |
14 | d2 = Dataobj()
15 |
16 | @data(d1, d2)
17 | def test_something(self, value):
18 | """This is an old docstring"""
19 | return value
20 |
21 |
22 | The first of the resulting test cases will have ``"""This is a new docstring"""`` as its docstring and the second will
23 | keep its old one (``"""This is an old docstring"""``).
24 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to DDT's documentation!
2 | ===============================
3 |
4 | DDT (Data-Driven Tests) allows you to multiply one test case
5 | by running it with different test data, and make it appear as
6 | multiple test cases.
7 |
8 | You can find (and fork) the project on Github_.
9 |
10 | DDT should work on Python2 and Python3, but we only officially test it for
11 | versions 2.7 and 3.5-3.8.
12 |
13 | Contents:
14 |
15 | .. toctree::
16 | :maxdepth: 2
17 |
18 | example
19 | faq
20 | api
21 |
22 | Indices and tables
23 | ==================
24 |
25 | * :ref:`genindex`
26 | * :ref:`modindex`
27 | * :ref:`search`
28 |
29 |
30 | .. _Github: https://github.com/datadriventests/ddt
31 |
--------------------------------------------------------------------------------
/release.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | python setup.py sdist bdist_wheel upload
3 |
--------------------------------------------------------------------------------
/requirements/build.txt:
--------------------------------------------------------------------------------
1 | -r test.txt
2 | Sphinx
3 | sphinxcontrib-programoutput
4 | enum34; python_version < '3'
5 |
--------------------------------------------------------------------------------
/requirements/release.txt:
--------------------------------------------------------------------------------
1 | wheel
2 | setuptools
3 | twine
4 | pytest
5 | enum34; python_version < '3'
6 |
--------------------------------------------------------------------------------
/requirements/test.txt:
--------------------------------------------------------------------------------
1 | aiounittest
2 | codecov
3 | coverage
4 | flake8
5 | pytest
6 | pytest-cov
7 | six>=1.4.0
8 | PyYAML
9 | mock; python_version < '3.3'
10 |
--------------------------------------------------------------------------------
/rtdocs.sh:
--------------------------------------------------------------------------------
1 | curl --data '' http://readthedocs.org/build/ddt
2 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [bdist_wheel]
2 | universal = 1
3 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # # coding: utf-8
3 |
4 | from setuptools import setup
5 | from ddt import __version__
6 |
7 | setup(
8 | name='ddt',
9 | description='Data-Driven/Decorated Tests',
10 | long_description='A library to multiply test cases',
11 | version=__version__,
12 | author='Carles Barrobés',
13 | author_email='carles@barrobes.com',
14 | url='https://github.com/datadriventests/ddt',
15 | py_modules=['ddt'],
16 | classifiers=[
17 | 'Development Status :: 4 - Beta',
18 | 'Intended Audience :: Developers',
19 | 'License :: OSI Approved :: MIT License',
20 | 'Operating System :: OS Independent',
21 | 'Programming Language :: Python',
22 | 'Programming Language :: Python :: 3',
23 | 'Programming Language :: Python :: 3.6',
24 | 'Programming Language :: Python :: 3.7',
25 | 'Programming Language :: Python :: 3.8',
26 | 'Programming Language :: Python :: 3.9',
27 | 'Programming Language :: Python :: 3.10',
28 | 'Topic :: Software Development :: Testing',
29 | ],
30 | )
31 |
--------------------------------------------------------------------------------
/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/datadriventests/ddt/5b4fec6152fd6be880c42f01213685a1a835720a/test/__init__.py
--------------------------------------------------------------------------------
/test/data/test_custom_yaml_loader.yaml:
--------------------------------------------------------------------------------
1 | bool:
2 | instance: !!bool "false"
3 | expected: false
4 |
5 | str:
6 | instance: !!str "test"
7 | expected: test
8 |
9 | int:
10 | instance: !!int "32"
11 | expected: 32
12 |
13 | float:
14 | instance: !!float "3.123"
15 | expected: 3.123
16 |
17 | python_list:
18 | instance: !!python/list [1,2,3,4]
19 | expected:
20 | - 1
21 | - 2
22 | - 3
23 | - 4
24 |
25 | python_dict:
26 | instance: !!python/dict
27 | a: 1
28 | b: asd
29 | c: false
30 | expected:
31 | a: 1
32 | b: asd
33 | c: false
34 |
35 | my_class:
36 | instance: !!python/object:test.test_example.MyClass
37 | a: 132
38 | b: true
39 | c:
40 | - alpha
41 | - beta
42 | d:
43 | _a: 1
44 | _b: test
45 | expected:
46 | a: 132
47 | b: true
48 | c:
49 | - alpha
50 | - beta
51 | d:
52 | _a: 1
53 | _b: test
54 |
55 | python_str:
56 | instance: !!python/str "test"
57 | expected: test
58 |
59 | python_int:
60 | instance: !!python/int "32"
61 | expected: 32
62 |
63 | python_float:
64 | instance: !!python/float "3.123"
65 | expected: 3.123
66 |
--------------------------------------------------------------------------------
/test/data/test_data_dict.json:
--------------------------------------------------------------------------------
1 | {
2 | "unsorted_list": [ 10, 12, 15 ],
3 | "sorted_list": [ 15, 12, 50 ]
4 | }
5 |
--------------------------------------------------------------------------------
/test/data/test_data_dict.yaml:
--------------------------------------------------------------------------------
1 | unsorted_list:
2 | - 10
3 | - 15
4 | - 12
5 |
6 | sorted_list: [ 15, 12, 50 ]
7 |
--------------------------------------------------------------------------------
/test/data/test_data_dict_dict.json:
--------------------------------------------------------------------------------
1 | {
2 | "positive_integer_range": {
3 | "start": 0,
4 | "end": 2,
5 | "value": 1
6 | },
7 | "negative_integer_range": {
8 | "start": -2,
9 | "end": 0,
10 | "value": -1
11 | },
12 | "positive_real_range": {
13 | "start": 0.0,
14 | "end": 1.0,
15 | "value": 0.5
16 | },
17 | "negative_real_range": {
18 | "start": -1.0,
19 | "end": 0.0,
20 | "value": -0.5
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/test/data/test_data_dict_dict.yaml:
--------------------------------------------------------------------------------
1 | positive_integer_range:
2 | start: 0
3 | end: 2
4 | value: 1
5 |
6 | negative_integer_range:
7 | start: -2
8 | end: 0
9 | value: -1
10 |
11 | positive_real_range:
12 | start: 0.0
13 | end: 1.0
14 | value: 0.5
15 |
16 | negative_real_range:
17 | start: -1.0
18 | end: 0.0
19 | value: -0.5
20 |
--------------------------------------------------------------------------------
/test/data/test_data_list.json:
--------------------------------------------------------------------------------
1 | [
2 | "Hello",
3 | "Goodbye"
4 | ]
5 |
6 |
--------------------------------------------------------------------------------
/test/data/test_data_list.yaml:
--------------------------------------------------------------------------------
1 | - "Hello"
2 | - "Goodbye"
3 |
--------------------------------------------------------------------------------
/test/data/test_functional_custom_tags.yaml:
--------------------------------------------------------------------------------
1 | custom_class:
2 | instance: !!python/object:test.test_functional.CustomClass {}
3 | expected: CustomClass
4 |
--------------------------------------------------------------------------------
/test/mycode.py:
--------------------------------------------------------------------------------
1 | """
2 | Some simple functions that we will use in our tests.
3 | """
4 |
5 |
6 | def larger_than_two(value):
7 | return value > 2
8 |
9 |
10 | def has_three_elements(value):
11 | return len(value) == 3
12 |
13 |
14 | def is_a_greeting(value):
15 | return value in ['Hello', 'Goodbye']
16 |
--------------------------------------------------------------------------------
/test/test_async.py:
--------------------------------------------------------------------------------
1 | import aiounittest
2 |
3 | from ddt import ddt, data
4 | from test.mycode import larger_than_two
5 |
6 |
7 | @ddt
8 | class TestAsync(aiounittest.AsyncTestCase):
9 | @data(3, 4, 12, 23)
10 | async def test_larger_than_two(self, value):
11 | self.assertTrue(larger_than_two(value))
12 |
--------------------------------------------------------------------------------
/test/test_example.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | import unittest
3 |
4 | from ddt import ddt, data, file_data, idata, unpack
5 | from test.mycode import larger_than_two, has_three_elements, is_a_greeting
6 |
7 | try:
8 | import yaml
9 | except ImportError: # pragma: no cover
10 | have_yaml_support = False
11 | else:
12 | have_yaml_support = True
13 |
14 | # A good-looking decorator
15 | needs_yaml = unittest.skipUnless(
16 | have_yaml_support, "Need YAML to run this test"
17 | )
18 |
19 |
20 | class Mylist(list):
21 | pass
22 |
23 |
24 | class MyClass:
25 | def __init__(self, **kwargs):
26 | for field, value in kwargs.items():
27 | setattr(self, field, value)
28 |
29 | def __eq__(self, other):
30 | return isinstance(other, dict) and vars(self) == other or \
31 | isinstance(other, MyClass) and vars(self) == vars(other)
32 |
33 | def __str__(self):
34 | return "TestObject %s" % vars(self)
35 |
36 |
37 | def annotated(a, b):
38 | r = Mylist([a, b])
39 | setattr(r, "__name__", "test_%d_greater_than_%d" % (a, b))
40 | return r
41 |
42 |
43 | def annotated2(listIn, name, docstring):
44 | r = Mylist(listIn)
45 | setattr(r, "__name__", name)
46 | setattr(r, "__doc__", docstring)
47 | return r
48 |
49 |
50 | @ddt
51 | class FooTestCase(unittest.TestCase):
52 | def test_undecorated(self):
53 | self.assertTrue(larger_than_two(24))
54 |
55 | @data(3, 4, 12, 23)
56 | def test_larger_than_two(self, value):
57 | self.assertTrue(larger_than_two(value))
58 |
59 | @data(1, -3, 2, 0)
60 | def test_not_larger_than_two(self, value):
61 | self.assertFalse(larger_than_two(value))
62 |
63 | @data(annotated(2, 1), annotated(10, 5))
64 | def test_greater(self, value):
65 | a, b = value
66 | self.assertGreater(a, b)
67 |
68 | @idata(itertools.product([0, 1, 2], [3, 4, 5]))
69 | def test_iterable_argument(self, value):
70 | first_value, second_value = value
71 | self.assertLessEqual(first_value, 2)
72 | self.assertGreaterEqual(second_value, 3)
73 |
74 | @data(annotated2([2, 1], 'Test_case_1', """Test docstring 1"""),
75 | annotated2([10, 5], 'Test_case_2', """Test docstring 2"""))
76 | def test_greater_with_name_docstring(self, value):
77 | a, b = value
78 | self.assertGreater(a, b)
79 | self.assertIsNotNone(getattr(value, "__name__"))
80 | self.assertIsNotNone(getattr(value, "__doc__"))
81 |
82 | @file_data('data/test_data_dict_dict.json')
83 | def test_file_data_json_dict_dict(self, start, end, value):
84 | self.assertLess(start, end)
85 | self.assertLess(value, end)
86 | self.assertGreater(value, start)
87 |
88 | @file_data('data/test_data_dict.json')
89 | def test_file_data_json_dict(self, value):
90 | self.assertTrue(has_three_elements(value))
91 |
92 | @file_data('data/test_data_list.json')
93 | def test_file_data_json_list(self, value):
94 | self.assertTrue(is_a_greeting(value))
95 |
96 | @needs_yaml
97 | @file_data('data/test_data_dict_dict.yaml')
98 | def test_file_data_yaml_dict_dict(self, start, end, value):
99 | self.assertLess(start, end)
100 | self.assertLess(value, end)
101 | self.assertGreater(value, start)
102 |
103 | @needs_yaml
104 | @file_data('data/test_data_dict.yaml')
105 | def test_file_data_yaml_dict(self, value):
106 | self.assertTrue(has_three_elements(value))
107 |
108 | @needs_yaml
109 | @file_data('data/test_data_list.yaml')
110 | def test_file_data_yaml_list(self, value):
111 | self.assertTrue(is_a_greeting(value))
112 |
113 | @data((3, 2), (4, 3), (5, 3))
114 | @unpack
115 | def test_tuples_extracted_into_arguments(self, first_value, second_value):
116 | self.assertTrue(first_value > second_value)
117 |
118 | @data([3, 2], [4, 3], [5, 3])
119 | @unpack
120 | def test_list_extracted_into_arguments(self, first_value, second_value):
121 | self.assertTrue(first_value > second_value)
122 |
123 | @unpack
124 | @data({'first': 1, 'second': 3, 'third': 2},
125 | {'first': 4, 'second': 6, 'third': 5})
126 | def test_dicts_extracted_into_kwargs(self, first, second, third):
127 | self.assertTrue(first < third < second)
128 |
129 | @data(u'ascii', u'non-ascii-\N{SNOWMAN}')
130 | def test_unicode(self, value):
131 | self.assertIn(value, (u'ascii', u'non-ascii-\N{SNOWMAN}'))
132 |
133 | @data(3, 4, 12, 23)
134 | def test_larger_than_two_with_doc(self, value):
135 | """Larger than two with value {0}"""
136 | self.assertTrue(larger_than_two(value))
137 |
138 | @data(3, 4, 12, 23)
139 | def test_doc_missing_args(self, value):
140 | """Missing args with value {0} and {1}"""
141 | self.assertTrue(larger_than_two(value))
142 |
143 | @data(3, 4, 12, 23)
144 | def test_doc_missing_kargs(self, value):
145 | """Missing kargs with value {value} {value2}"""
146 | self.assertTrue(larger_than_two(value))
147 |
148 | @data([3, 2], [4, 3], [5, 3])
149 | @unpack
150 | def test_list_extracted_with_doc(self, first_value, second_value):
151 | """Extract into args with first value {} and second value {}"""
152 | self.assertTrue(first_value > second_value)
153 |
154 |
155 | if have_yaml_support:
156 | # This test will only succeed if the execution context is from the ddt
157 | # directory. pyyaml cannot locate test.test_example.MyClass otherwise!
158 |
159 | @ddt
160 | class YamlOnlyTestCase(unittest.TestCase):
161 | @file_data('data/test_custom_yaml_loader.yaml', yaml.UnsafeLoader)
162 | def test_custom_yaml_loader(self, instance, expected):
163 | """Test with yaml tags to create specific classes to compare"""
164 | self.assertEqual(expected, instance)
165 |
--------------------------------------------------------------------------------
/test/test_functional.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | from sys import modules
4 | import pytest
5 | import six
6 |
7 | try:
8 | from unittest import mock
9 | except ImportError:
10 | import mock
11 |
12 | from ddt import ddt, data, file_data, idata, TestNameFormat
13 |
14 | from test.mycode import has_three_elements
15 |
16 |
17 | class CustomClass:
18 | pass
19 |
20 |
21 | @ddt
22 | class Dummy(object):
23 | """
24 | Dummy class to test the data decorator on
25 | """
26 |
27 | @data(1, 2, 3, 4)
28 | def test_something(self, value):
29 | return value
30 |
31 |
32 | @ddt(testNameFormat=TestNameFormat.DEFAULT)
33 | class DummyTestNameFormatDefault(object):
34 | """
35 | Dummy class to test the ddt decorator that generates test names using the
36 | default format (index and values).
37 | """
38 |
39 | @data("a", "b", "c", "d")
40 | def test_something(self, value):
41 | return value
42 |
43 |
44 | @ddt(testNameFormat=TestNameFormat.INDEX_ONLY)
45 | class DummyTestNameFormatIndexOnly(object):
46 | """
47 | Dummy class to test the ddt decorator that generates test names using only
48 | the index.
49 | """
50 |
51 | @data("a", "b", "c", "d")
52 | def test_something(self, value):
53 | return value
54 |
55 |
56 | @ddt
57 | class DummyInvalidIdentifier():
58 | """
59 | Dummy class to test the data decorator receiving values invalid characters
60 | identifiers
61 | """
62 |
63 | @data('32v2 g #Gmw845h$W b53wi.')
64 | def test_data_with_invalid_identifier(self, value):
65 | return value
66 |
67 |
68 | @ddt
69 | class FileDataDummy(object):
70 | """
71 | Dummy class to test the file_data decorator on
72 | """
73 |
74 | @file_data("data/test_data_dict.json")
75 | def test_something_again(self, value):
76 | return value
77 |
78 |
79 | @ddt
80 | class JSONFileDataMissingDummy(object):
81 | """
82 | Dummy class to test the file_data decorator on when
83 | JSON file is missing
84 | """
85 |
86 | @file_data("data/test_data_dict_missing.json")
87 | def test_something_again(self, value):
88 | return value
89 |
90 |
91 | @ddt
92 | class YAMLFileDataMissingDummy(object):
93 | """
94 | Dummy class to test the file_data decorator on when
95 | YAML file is missing
96 | """
97 |
98 | @file_data("data/test_data_dict_missing.yaml")
99 | def test_something_again(self, value):
100 | return value
101 |
102 |
103 | def test_data_decorator():
104 | """
105 | Test the ``data`` method decorator
106 | """
107 |
108 | def hello():
109 | pass
110 |
111 | pre_size = len(hello.__dict__)
112 | keys = set(hello.__dict__.keys())
113 | data_hello = data(1, 2)(hello)
114 | dh_keys = set(data_hello.__dict__.keys())
115 | post_size = len(data_hello.__dict__)
116 |
117 | assert post_size == pre_size + 2
118 | extra_attrs = list(dh_keys - keys)
119 | extra_attrs.sort()
120 | assert len(extra_attrs) == 2
121 | assert getattr(data_hello, extra_attrs[0]) == 1
122 | assert getattr(data_hello, extra_attrs[1]) == (1, 2)
123 |
124 |
125 | def test_file_data_decorator_with_dict():
126 | """
127 | Test the ``file_data`` method decorator
128 | """
129 |
130 | def hello():
131 | pass
132 |
133 | pre_size = len(hello.__dict__)
134 | keys = set(hello.__dict__.keys())
135 | data_hello = data("test_data_dict.json")(hello)
136 |
137 | dh_keys = set(data_hello.__dict__.keys())
138 | post_size = len(data_hello.__dict__)
139 | assert post_size == pre_size + 2
140 |
141 | extra_attrs = list(dh_keys - keys)
142 | extra_attrs.sort()
143 | assert len(extra_attrs) == 2
144 | assert getattr(data_hello, extra_attrs[0]) == 1
145 | assert getattr(data_hello, extra_attrs[1]) == ("test_data_dict.json",)
146 |
147 |
148 | def _is_test(x):
149 | return x.startswith('test_')
150 |
151 |
152 | def test_ddt():
153 | """
154 | Test the ``ddt`` class decorator
155 | """
156 | tests = len(list(filter(_is_test, Dummy.__dict__)))
157 | assert tests == 4
158 |
159 |
160 | def test_ddt_format_test_name_index_only():
161 | """
162 | Test the ``ddt`` class decorator with ``INDEX_ONLY`` test name format
163 | """
164 | tests = set(filter(_is_test, DummyTestNameFormatIndexOnly.__dict__))
165 | assert len(tests) == 4
166 |
167 | indexes = range(1, 5)
168 | dataSets = ["a", "b", "c", "d"] # @data from DummyTestNameFormatIndexOnly
169 | for i, d in zip(indexes, dataSets):
170 | assert ("test_something_{}".format(i) in tests)
171 | assert not ("test_something_{}_{}".format(i, d) in tests)
172 |
173 |
174 | def test_ddt_format_test_name_default():
175 | """
176 | Test the ``ddt`` class decorator with ``DEFAULT`` test name format
177 | """
178 | tests = set(filter(_is_test, DummyTestNameFormatDefault.__dict__))
179 | assert len(tests) == 4
180 |
181 | indexes = range(1, 5)
182 | dataSets = ["a", "b", "c", "d"] # @data from DummyTestNameFormatDefault
183 | for i, d in zip(indexes, dataSets):
184 | assert not ("test_something_{}".format(i) in tests)
185 | assert ("test_something_{}_{}".format(i, d) in tests)
186 |
187 |
188 | def test_idata_single_argument():
189 | """Test that the single-argument form of ``idata`` works."""
190 | payload = [5, 12, 13]
191 |
192 | @ddt
193 | class Dummy(object):
194 | """Dummy class to test that the ``idata(iterable)`` decorator works."""
195 | @idata(payload)
196 | def test_something(self, value):
197 | return value
198 |
199 | tests = list(filter(_is_test, Dummy.__dict__))
200 | assert len(tests) == len(payload)
201 |
202 | expected_tests = [
203 | "test_something_{:1d}_{}".format(i + 1, v) for i, v in enumerate(payload)
204 | ]
205 | assert sorted(tests) == sorted(expected_tests)
206 |
207 |
208 | def test_idata_automatic_zero_padding():
209 | """
210 | Test that the single-argument form of ``idata`` zero-pads its keys so the
211 | lengths all match
212 | """
213 | payload = range(15)
214 |
215 | @ddt
216 | class Dummy(object):
217 | """Dummy class to test that the ``idata(iterable)`` decorator works."""
218 | @idata(payload)
219 | def test_something(self, value):
220 | return value
221 |
222 | tests = list(filter(_is_test, Dummy.__dict__))
223 | assert len(tests) == len(payload)
224 |
225 | expected_tests = [
226 | "test_something_{:02d}_{}".format(i + 1, v) for i, v in enumerate(payload)
227 | ]
228 | assert sorted(tests) == sorted(expected_tests)
229 |
230 |
231 | def test_idata_override_index_len():
232 | """
233 | Test that overriding ``index_len`` in ``idata`` can allow additional
234 | zero-padding to be added.
235 | """
236 | payload = [4, 2, 1]
237 |
238 | @ddt
239 | class Dummy(object):
240 | @idata(payload, index_len=2)
241 | def test_something(self, value):
242 | return value
243 |
244 | tests = list(filter(_is_test, Dummy.__dict__))
245 | assert len(tests) == len(payload)
246 |
247 | expected_tests = [
248 | "test_something_{:02d}_{}".format(i + 1, v) for i, v in enumerate(payload)
249 | ]
250 | assert sorted(tests) == sorted(expected_tests)
251 |
252 |
253 | def test_idata_consumable_iterator():
254 | """
255 | Test that using ``idata`` with a consumable iterator still generates the
256 | expected tests.
257 | """
258 | payload = [51, 78, 2]
259 |
260 | def consumable_iterator():
261 | # Not using `yield from` for Python 2.7.
262 | for i in payload:
263 | yield i
264 |
265 | @ddt
266 | class Dummy(object):
267 | @idata(consumable_iterator())
268 | def test_something(self, value):
269 | return value
270 |
271 | tests = list(filter(_is_test, Dummy.__dict__))
272 |
273 | expected_tests = [
274 | "test_something_{:1d}_{}".format(i + 1, v) for i, v in enumerate(payload)
275 | ]
276 | assert sorted(tests) == sorted(expected_tests)
277 |
278 |
279 | def test_file_data_test_creation():
280 | """
281 | Test that the ``file_data`` decorator creates two tests
282 | """
283 |
284 | tests = len(list(filter(_is_test, FileDataDummy.__dict__)))
285 | assert tests == 2
286 |
287 |
288 | def test_file_data_test_names_dict():
289 | """
290 | Test that ``file_data`` creates tests with the correct name
291 |
292 | Name is the the function name plus the key in the JSON data,
293 | when it is parsed as a dictionary.
294 | """
295 |
296 | tests = set(filter(_is_test, FileDataDummy.__dict__))
297 |
298 | tests_dir = os.path.dirname(__file__)
299 | test_data_path = os.path.join(tests_dir, 'data/test_data_dict.json')
300 | test_data = json.loads(open(test_data_path).read())
301 | index_len = len(str(len(test_data)))
302 | created_tests = set([
303 | "test_something_again_{0:0{2}}_{1}".format(index + 1, name, index_len)
304 | for index, name in enumerate(test_data.keys())
305 | ])
306 |
307 | assert tests == created_tests
308 |
309 |
310 | def test_feed_data_data():
311 | """
312 | Test that data is fed to the decorated tests
313 | """
314 | tests = filter(_is_test, Dummy.__dict__)
315 |
316 | values = []
317 | obj = Dummy()
318 | for test in tests:
319 | method = getattr(obj, test)
320 | values.append(method())
321 |
322 | assert set(values) == set([1, 2, 3, 4])
323 |
324 |
325 | def test_feed_data_file_data():
326 | """
327 | Test that data is fed to the decorated tests from a file
328 | """
329 | tests = filter(_is_test, FileDataDummy.__dict__)
330 |
331 | values = []
332 | obj = FileDataDummy()
333 | for test in tests:
334 | method = getattr(obj, test)
335 | values.extend(method())
336 |
337 | assert set(values) == set([10, 12, 15, 15, 12, 50])
338 |
339 |
340 | def test_feed_data_file_data_missing_json():
341 | """
342 | Test that a ValueError is raised when JSON file is missing
343 | """
344 | tests = filter(_is_test, JSONFileDataMissingDummy.__dict__)
345 |
346 | obj = JSONFileDataMissingDummy()
347 | for test in tests:
348 | method = getattr(obj, test)
349 | with pytest.raises(ValueError):
350 | method()
351 |
352 |
353 | def test_feed_data_file_data_missing_yaml():
354 | """
355 | Test that a ValueError is raised when YAML file is missing
356 | """
357 | tests = filter(_is_test, YAMLFileDataMissingDummy.__dict__)
358 |
359 | obj = YAMLFileDataMissingDummy()
360 | for test in tests:
361 | method = getattr(obj, test)
362 | with pytest.raises(ValueError):
363 | method()
364 |
365 |
366 | def test_ddt_data_name_attribute():
367 | """
368 | Test the ``__name__`` attribute handling of ``data`` items with ``ddt``
369 | """
370 |
371 | def hello():
372 | pass
373 |
374 | class Myint(int):
375 | pass
376 |
377 | class Mytest(object):
378 | pass
379 |
380 | d1 = Myint(1)
381 | d1.__name__ = 'data1'
382 |
383 | d2 = Myint(2)
384 |
385 | data_hello = data(d1, d2)(hello)
386 | setattr(Mytest, 'test_hello', data_hello)
387 |
388 | ddt_mytest = ddt(Mytest)
389 | assert getattr(ddt_mytest, 'test_hello_1_data1')
390 | assert getattr(ddt_mytest, 'test_hello_2_2')
391 |
392 |
393 | def test_ddt_data_doc_attribute():
394 | """
395 | Test the ``__doc__`` attribute handling of ``data`` items with ``ddt``
396 | """
397 |
398 | def func_w_doc():
399 | """testFunctionDocstring {6}
400 |
401 | :param: None
402 | :return: None
403 | """
404 | pass
405 |
406 | def func_wo_doc():
407 | pass
408 |
409 | class Myint(int):
410 | pass
411 |
412 | class Mytest(object):
413 | pass
414 |
415 | d1 = Myint(1)
416 | d1.__name__ = 'case1'
417 | d1.__doc__ = """docstring1"""
418 |
419 | d2 = Myint(2)
420 | d2.__name__ = 'case2'
421 |
422 | data_hello = data(d1, d2, {'test': True})(func_w_doc)
423 | data_hello2 = data(d1, d2, {'test': True})(func_wo_doc)
424 |
425 | setattr(Mytest, 'first_test', data_hello)
426 | setattr(Mytest, 'second_test', data_hello2)
427 | ddt_mytest = ddt(Mytest)
428 |
429 | assert getattr(
430 | getattr(ddt_mytest, 'first_test_1_case1'), '__doc__'
431 | ) == d1.__doc__
432 | assert getattr(
433 | getattr(ddt_mytest, 'first_test_2_case2'), '__doc__'
434 | ) == func_w_doc.__doc__
435 | assert getattr(
436 | getattr(ddt_mytest, 'first_test_3'), '__doc__'
437 | ) == func_w_doc.__doc__
438 | assert getattr(
439 | getattr(ddt_mytest, 'second_test_1_case1'), '__doc__'
440 | ) == d1.__doc__
441 | assert getattr(
442 | getattr(ddt_mytest, 'second_test_2_case2'), '__doc__'
443 | ) is None
444 | assert getattr(getattr(ddt_mytest, 'second_test_3'), '__doc__') is None
445 |
446 |
447 | def test_ddt_data_unicode():
448 | """
449 | Test that unicode strings are converted to function names correctly
450 | """
451 | # We test unicode support separately for python 2 and 3
452 |
453 | if six.PY2:
454 |
455 | @ddt
456 | class Mytest(object):
457 | @data(u'ascii', u'non-ascii-\N{SNOWMAN}', {u'\N{SNOWMAN}': 'data'})
458 | def test_hello(self, val):
459 | pass
460 |
461 | assert getattr(Mytest, 'test_hello_1_ascii') is not None
462 | assert getattr(Mytest, 'test_hello_2_non_ascii__u2603') is not None
463 | assert getattr(Mytest, 'test_hello_3') is not None
464 |
465 | elif six.PY3:
466 |
467 | @ddt
468 | class Mytest(object):
469 | @data('ascii', 'non-ascii-\N{SNOWMAN}', {'\N{SNOWMAN}': 'data'})
470 | def test_hello(self, val):
471 | pass
472 |
473 | assert getattr(Mytest, 'test_hello_1_ascii') is not None
474 | assert getattr(Mytest, 'test_hello_2_non_ascii__') is not None
475 | assert getattr(Mytest, 'test_hello_3') is not None
476 |
477 |
478 | def test_ddt_data_object():
479 | """
480 | Test not using value if non-trivial arguments
481 | """
482 |
483 | @ddt
484 | class Mytest(object):
485 | @data(object())
486 | def test_object(self, val):
487 | pass
488 | assert getattr(Mytest, 'test_object_1') is not None
489 |
490 |
491 | def test_feed_data_with_invalid_identifier():
492 | """
493 | Test that data is fed to the decorated tests
494 | """
495 | tests = list(filter(_is_test, DummyInvalidIdentifier.__dict__))
496 | assert len(tests) == 1
497 |
498 | obj = DummyInvalidIdentifier()
499 | method = getattr(obj, tests[0])
500 | assert (
501 | method.__name__ ==
502 | 'test_data_with_invalid_identifier_1_32v2_g__Gmw845h_W_b53wi_'
503 | )
504 | assert method() == '32v2 g #Gmw845h$W b53wi.'
505 |
506 |
507 | @mock.patch('ddt._have_yaml', False)
508 | def test_load_yaml_without_yaml_support():
509 | """
510 | Test that YAML files are not loaded if YAML is not installed.
511 | """
512 |
513 | @ddt
514 | class NoYAMLInstalledTest(object):
515 |
516 | @file_data('data/test_data_dict.yaml')
517 | def test_file_data_yaml_dict(self, value):
518 | assert has_three_elements(value)
519 |
520 | tests = filter(_is_test, NoYAMLInstalledTest.__dict__)
521 |
522 | obj = NoYAMLInstalledTest()
523 | for test in tests:
524 | method = getattr(obj, test)
525 | with pytest.raises(ValueError):
526 | method()
527 |
528 |
529 | def test_load_yaml_with_python_tag():
530 | """
531 | Test that YAML files containing python tags throw no exception if an
532 | loader allowing python tags is passed.
533 | """
534 |
535 | from yaml import UnsafeLoader
536 | from yaml.constructor import ConstructorError
537 |
538 | def str_to_type(class_name):
539 | return getattr(modules[__name__], class_name)
540 |
541 | try:
542 | @ddt
543 | class YamlDefaultLoaderTest(object):
544 | @file_data('data/test_functional_custom_tags.yaml')
545 | def test_cls_is_instance(self, cls, expected):
546 | assert isinstance(cls, str_to_type(expected))
547 | except Exception as e:
548 | if not isinstance(e, ConstructorError):
549 | raise AssertionError()
550 |
551 | @ddt
552 | class YamlUnsafeLoaderTest(object):
553 | @file_data('data/test_functional_custom_tags.yaml', UnsafeLoader)
554 | def test_cls_is_instance(self, instance, expected):
555 | assert isinstance(instance, str_to_type(expected))
556 |
557 | tests = list(filter(_is_test, YamlUnsafeLoaderTest.__dict__))
558 | obj = YamlUnsafeLoaderTest()
559 |
560 | if not tests:
561 | raise AssertionError('No tests have been found.')
562 |
563 | for test in tests:
564 | method = getattr(obj, test)
565 | method()
566 |
--------------------------------------------------------------------------------
/test/test_named_data.py:
--------------------------------------------------------------------------------
1 | import ddt
2 | import unittest
3 |
4 |
5 | @ddt.ddt
6 | class TestNamedData(unittest.TestCase):
7 | class NonTrivialClass(object):
8 | pass
9 |
10 | @ddt.named_data(
11 | ['Single', 0, 1]
12 | )
13 | def test_single_named_value(self, value1, value2):
14 | self.assertGreater(value2, value1)
15 |
16 | @ddt.named_data(
17 | ['1st', 1, 2],
18 | ('2nd', 3, 4)
19 | )
20 | def test_multiple_named_value_seqs(self, value1, value2):
21 | self.assertGreater(value2, value1)
22 |
23 | @ddt.named_data(
24 | dict(name='1st', value2=1, value1=0),
25 | {'name': '2nd', 'value2': 1, 'value1': 0}
26 | )
27 | def test_multiple_named_value_dicts(self, value1, value2):
28 | self.assertGreater(value2, value1)
29 |
30 | @ddt.named_data(
31 | dict(name='1st', value2=1, value1=0),
32 | ('2nd', 0, 1)
33 | )
34 | def test_multiple_named_value_mixed(self, value1, value2):
35 | self.assertGreater(value2, value1)
36 |
37 | @ddt.named_data(
38 | ['Passes', NonTrivialClass(), True],
39 | ('Fails', 1, False)
40 | )
41 | def test_seq_with_nontrivial_type(self, value, passes):
42 | if passes:
43 | self.assertIsInstance(value, self.NonTrivialClass)
44 | else:
45 | self.assertNotIsInstance(value, self.NonTrivialClass)
46 |
47 | @ddt.named_data(
48 | {'name': 'Passes', 'value': NonTrivialClass(), 'passes': True},
49 | {'name': 'Fails', 'value': 1, 'passes': False}
50 | )
51 | def test_dict_with_nontrivial_type(self, value, passes):
52 | if passes:
53 | self.assertIsInstance(value, self.NonTrivialClass)
54 | else:
55 | self.assertNotIsInstance(value, self.NonTrivialClass)
56 |
57 | def test_missing_name_dict(self):
58 | with self.assertRaises(KeyError):
59 | @ddt.named_data(
60 | {'not_a_name': 'oops', 'value': 1}
61 | )
62 | def _internal_test(value):
63 | pass
64 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py36,py37,py38,py39,py310
3 |
4 | [testenv]
5 | deps =
6 | pytest
7 | pytest-cov
8 | coverage
9 | aiounittest
10 | flake8
11 | six>=1.4.0
12 | PyYAML
13 | commands =
14 | pytest --cov=ddt --cov-report html
15 | flake8 ddt.py test
16 |
17 | [flake8]
18 | max-line-length = 127
19 | exclude = .git,__pycache__,docs/source/conf.py,old,build,dist,.tox,.venv
20 | max-complexity = 10
21 |
22 | [gh-actions]
23 | python =
24 | 3.8: py38
25 | 3.6: py36
26 | 3.7: py37
27 | 3.8: py38
28 | 3.9: py39
29 | 3.10: py310
30 |
--------------------------------------------------------------------------------