├── testfixtures ├── py.typed ├── version.txt ├── tests │ ├── test_django │ │ ├── __init__.py │ │ ├── models.py │ │ ├── settings.py │ │ ├── manage.py │ │ ├── test_shouldraise.py │ │ └── test_compare.py │ ├── sample3.py │ ├── __init__.py │ ├── sample2.py │ ├── test_generator.py │ ├── directory-contents.txt │ ├── configparser-write.txt │ ├── configparser-read.txt │ ├── test_diff.py │ ├── test_stringcomparison.py │ ├── sample1.py │ ├── test_mock.py │ ├── test_tempdir.py │ ├── test_replacer.py │ ├── test_compare_typed.py │ ├── test_sybil.py │ ├── test_roundcomparison.py │ ├── test_outputcapture.py │ ├── test_rangecomparison.py │ ├── test_twisted.py │ ├── test_shouldwarn.py │ ├── test_wrap.py │ ├── test_log_capture.py │ ├── test_popen_docs.py │ ├── test_time.py │ ├── test_mappingcomparison.py │ └── test_date.py ├── compat.py ├── mock.py ├── __init__.py ├── resolve.py ├── sybil.py ├── rmtree.py ├── shouldwarn.py ├── django.py ├── utils.py ├── shouldraise.py ├── twisted.py └── outputcapture.py ├── CLAUDE.md ├── docs ├── changes.txt ├── license.txt ├── twisted.txt ├── index.txt ├── installation.txt ├── development.txt ├── conf.py ├── django.txt ├── Makefile ├── make.bat ├── streams.txt ├── warnings.txt ├── utilities.txt ├── api.txt ├── exceptions.txt └── popen.txt ├── pytest.ini ├── .carthorse.yml ├── .gitignore ├── .coveragerc ├── .readthedocs.yml ├── conftest.py ├── LICENSE.txt ├── mypy.ini ├── setup.py ├── README.rst ├── .circleci └── config.yml └── AGENTS.md /testfixtures/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /testfixtures/version.txt: -------------------------------------------------------------------------------- 1 | 10.0.0 2 | -------------------------------------------------------------------------------- /testfixtures/tests/test_django/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /CLAUDE.md: -------------------------------------------------------------------------------- 1 | Read @AGENTS.md for instructions. 2 | -------------------------------------------------------------------------------- /testfixtures/tests/sample3.py: -------------------------------------------------------------------------------- 1 | from .sample1 import z 2 | 3 | SOME_CONSTANT = 42 4 | -------------------------------------------------------------------------------- /docs/changes.txt: -------------------------------------------------------------------------------- 1 | 2 | .. currentmodule:: testfixtures 3 | 4 | .. include:: ../CHANGELOG.rst 5 | -------------------------------------------------------------------------------- /docs/license.txt: -------------------------------------------------------------------------------- 1 | ======= 2 | License 3 | ======= 4 | 5 | .. literalinclude:: ../LICENSE.txt 6 | -------------------------------------------------------------------------------- /testfixtures/tests/__init__.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.simplefilter('default', ImportWarning) 3 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = -p no:doctest 3 | norecursedirs=_build 4 | DJANGO_SETTINGS_MODULE=testfixtures.tests.test_django.settings 5 | filterwarnings = 6 | ignore::DeprecationWarning 7 | ignore::PendingDeprecationWarning 8 | error::SyntaxWarning 9 | -------------------------------------------------------------------------------- /.carthorse.yml: -------------------------------------------------------------------------------- 1 | carthorse: 2 | version-from: setup.py 3 | tag-format: "{version}" 4 | when: 5 | - version-not-tagged 6 | actions: 7 | - run: "pip install -e .[build]" 8 | - run: "twine upload -u __token__ -p $PYPI_TOKEN dist/*" 9 | - create-tag 10 | -------------------------------------------------------------------------------- /testfixtures/compat.py: -------------------------------------------------------------------------------- 1 | # compatibility module for different python versions 2 | import sys 3 | from typing import Tuple 4 | 5 | PY_VERSION: Tuple[int, int] = sys.version_info[:2] 6 | 7 | PY_312_PLUS: bool = PY_VERSION >= (3, 12) 8 | PY_313_PLUS: bool = PY_VERSION >= (3, 13) 9 | -------------------------------------------------------------------------------- /testfixtures/tests/test_django/models.py: -------------------------------------------------------------------------------- 1 | from django.db import models 2 | 3 | 4 | class OtherModel(models.Model): 5 | pass 6 | 7 | 8 | class SampleModel(models.Model): 9 | value = models.IntegerField() 10 | not_editable = models.IntegerField(editable=False) 11 | created = models.DateTimeField(auto_now_add=True) 12 | -------------------------------------------------------------------------------- /testfixtures/tests/test_django/settings.py: -------------------------------------------------------------------------------- 1 | SECRET_KEY = 'fake-key' 2 | INSTALLED_APPS = [ 3 | 'django.contrib.auth', 4 | 'django.contrib.contenttypes', 5 | "testfixtures.tests.test_django", 6 | ] 7 | 8 | DATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3'}} 9 | 10 | DEFAULT_AUTO_FIELD='django.db.models.AutoField' 11 | -------------------------------------------------------------------------------- /testfixtures/tests/test_django/manage.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | def main(): 4 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testfixtures.tests.test_django.settings") 5 | from django.core.management import execute_from_command_line 6 | execute_from_command_line() 7 | 8 | if __name__ == "__main__": # pragma: no cover 9 | main() 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.installed.cfg 2 | /bin/ 3 | /develop-eggs 4 | /dist 5 | /docs/_build 6 | /eggs 7 | /*.egg-info/ 8 | /parts/ 9 | *.pyc 10 | /.coverage 11 | /*.xml 12 | /.tox 13 | /htmlcov 14 | /include 15 | /lib 16 | /local 17 | /man 18 | /.Python 19 | desc.html 20 | pip-selfcheck.json 21 | .coverage.* 22 | .cache 23 | .pytest* 24 | /build 25 | .venv 26 | /.claude 27 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | source = testfixtures 3 | 4 | [report] 5 | exclude_lines = 6 | # the original exclude 7 | pragma: no cover 8 | 9 | # code executed only when tests fail 10 | 'No exception raised!' 11 | self\.fail\('Expected 12 | 13 | # example code that we don't want to cover with pragma statements 14 | guppy = 15 | 16 | if TYPE_CHECKING: 17 | 18 | \.\.\. 19 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | # https://github.com/sphinx-doc/sphinx/issues/13178 - waiting for Sphinx 8.2 7 | python: "3.12" 8 | 9 | python: 10 | install: 11 | - method: pip 12 | path: . 13 | extra_requirements: 14 | - docs 15 | sphinx: 16 | fail_on_warning: true 17 | configuration: docs/conf.py 18 | -------------------------------------------------------------------------------- /docs/twisted.txt: -------------------------------------------------------------------------------- 1 | Testing with Twisted 2 | ==================== 3 | 4 | Due to its longevity, Twisted has many of its own patterns for things that have since become 5 | standard in Python. One of these is logging, where it has its own logging framework. 6 | 7 | A :class:`testfixtures.twisted.LogCapture` helper is provided, but given the framework's 8 | relatively niche use now, the documentation is provided by way of the test suite: 9 | 10 | 11 | .. literalinclude:: ../testfixtures/tests/test_twisted.py 12 | -------------------------------------------------------------------------------- /testfixtures/tests/sample2.py: -------------------------------------------------------------------------------- 1 | # NB: This file is used in the documentation, if you make changes, ensure 2 | # you update the line numbers in popen.txt! 3 | """ 4 | A sample module containing the kind of code that 5 | testfixtures helps with testing 6 | """ 7 | 8 | from testfixtures.tests.sample1 import X, z 9 | 10 | try: 11 | from guppy import hpy 12 | guppy = True 13 | except ImportError: 14 | guppy = False 15 | 16 | 17 | def dump(path): 18 | if guppy: 19 | hpy().heap().stat.dump(path) 20 | -------------------------------------------------------------------------------- /testfixtures/tests/test_generator.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from types import GeneratorType 4 | 5 | from testfixtures import generator 6 | 7 | 8 | class TestG(TestCase): 9 | 10 | def test_example(self): 11 | g = generator(1, 2, 3) 12 | self.assertTrue(isinstance(g, GeneratorType)) 13 | self.assertEqual(tuple(g), (1, 2, 3)) 14 | 15 | def test_from_sequence(self): 16 | s = (1, 2, 3) 17 | g = generator(*s) 18 | self.assertTrue(isinstance(g, GeneratorType)) 19 | self.assertEqual(tuple(g), (1, 2, 3)) 20 | -------------------------------------------------------------------------------- /testfixtures/tests/test_django/test_shouldraise.py: -------------------------------------------------------------------------------- 1 | from django.core.exceptions import ValidationError 2 | 3 | from testfixtures import ShouldRaise 4 | from testfixtures.shouldraise import ShouldAssert 5 | 6 | 7 | class TestShouldRaiseWithValidatorErrors: 8 | 9 | def test_as_expected(self): 10 | with ShouldRaise(ValidationError("d'oh")): 11 | raise ValidationError("d'oh") 12 | 13 | def test_not_as_expected(self): 14 | message = ( 15 | 'ValidationError(["d\'oh"]) (expected) != ' 16 | 'ValidationError([\'nuts\']) (raised)' 17 | ) 18 | with ShouldAssert(message): 19 | with ShouldRaise(ValidationError("d'oh")): 20 | raise ValidationError("nuts") 21 | -------------------------------------------------------------------------------- /testfixtures/tests/directory-contents.txt: -------------------------------------------------------------------------------- 1 | Here's an example piece of code that creates some files and 2 | directories: 3 | 4 | .. code-block:: python 5 | 6 | import os 7 | 8 | def spew(path): 9 | with open(os.path.join(path, 'root.txt'), 'wb') as f: 10 | f.write(b'root output') 11 | os.mkdir(os.path.join(path, 'subdir')) 12 | with open(os.path.join(path, 'subdir', 'file.txt'), 'wb') as f: 13 | f.write(b'subdir output') 14 | os.mkdir(os.path.join(path, 'subdir', 'logs')) 15 | 16 | This function is used as follows: 17 | 18 | >>> spew(tempdir.path) 19 | 20 | This will create the following files and directories:: 21 | 22 | root.txt 23 | subdir/ 24 | subdir/file.txt 25 | subdir/logs/ 26 | 27 | .. -> expected_listing 28 | 29 | .. invisible-code-block: python 30 | 31 | # check the listing was as expected 32 | tempdir.compare(expected_listing.strip().split('\n')) 33 | -------------------------------------------------------------------------------- /docs/index.txt: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | 3 | The sections below describe the use of the various tools included: 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | comparing.txt 9 | mocking.txt 10 | datetime.txt 11 | logging.txt 12 | streams.txt 13 | files.txt 14 | exceptions.txt 15 | warnings.txt 16 | popen.txt 17 | django.txt 18 | twisted.txt 19 | utilities.txt 20 | 21 | If you're looking for a description of a particular tool, please see 22 | the API reference: 23 | 24 | .. toctree:: 25 | :maxdepth: 1 26 | 27 | api.txt 28 | 29 | For details of how to install the package or get involved in its 30 | development, please see the sections below: 31 | 32 | .. toctree:: 33 | :maxdepth: 1 34 | 35 | installation.txt 36 | development.txt 37 | changes.txt 38 | license.txt 39 | 40 | Indices and tables 41 | ================== 42 | 43 | * :ref:`genindex` 44 | * :ref:`modindex` 45 | * :ref:`search` 46 | 47 | -------------------------------------------------------------------------------- /testfixtures/tests/configparser-write.txt: -------------------------------------------------------------------------------- 1 | .. invisible-code-block: python 2 | 3 | # change to the temp directory 4 | import os 5 | original_dir = os.getcwd() 6 | os.chdir(tempdir.path) 7 | 8 | To construct a configuration file using the :mod:`ConfigParser` 9 | module, you would do the following: 10 | 11 | .. code-block:: python 12 | 13 | from configparser import ConfigParser 14 | config = ConfigParser() 15 | config.add_section('A Section') 16 | config.set('A Section', 'dir', 'frob') 17 | f = open('example.cfg','w') 18 | config.write(f) 19 | f.close() 20 | 21 | The generated configuration file will be as follows: 22 | 23 | .. topic:: example.cfg 24 | :class: read-file 25 | 26 | :: 27 | 28 | [A Section] 29 | dir = frob 30 | 31 | 32 | .. config parser writes whitespace at the end, be careful when testing! 33 | 34 | .. invisible-code-block: python 35 | 36 | # change out again 37 | import os 38 | os.chdir(original_dir) 39 | 40 | -------------------------------------------------------------------------------- /testfixtures/tests/configparser-read.txt: -------------------------------------------------------------------------------- 1 | Here's an example configuration file: 2 | 3 | .. topic:: example.cfg 4 | :class: write-file 5 | 6 | :: 7 | 8 | [A Section] 9 | dir=frob 10 | long: this value continues 11 | on the next line 12 | 13 | .. invisible-code-block: python 14 | 15 | # change to the temp directory 16 | import os 17 | original_dir = os.getcwd() 18 | os.chdir(tempdir.path) 19 | 20 | To parse this file using the :mod:`ConfigParser` module, you would 21 | do the following: 22 | 23 | .. code-block:: python 24 | 25 | from configparser import ConfigParser 26 | config = ConfigParser() 27 | config.read('example.cfg') 28 | 29 | The items in the section are now available as follows: 30 | 31 | >>> for name, value in sorted(config.items('A Section')): 32 | ... print('{0!r}:{1!r}'.format(name, value)) 33 | 'dir':'frob' 34 | 'long':'this value continues\non the next line' 35 | 36 | .. invisible-code-block: python 37 | 38 | # change out again 39 | import os 40 | os.chdir(original_dir) 41 | -------------------------------------------------------------------------------- /conftest.py: -------------------------------------------------------------------------------- 1 | from doctest import REPORT_NDIFF, ELLIPSIS 2 | 3 | from sybil import Sybil 4 | from sybil.parsers.doctest import DocTestParser 5 | from sybil.parsers.codeblock import PythonCodeBlockParser 6 | from sybil.parsers.capture import parse_captures 7 | from sybil.parsers.skip import skip 8 | 9 | from testfixtures import TempDirectory 10 | from testfixtures.sybil import FileParser 11 | 12 | 13 | def sybil_setup(namespace): 14 | # _tempdir is in case it's overwritten by a test. 15 | namespace['tempdir'] = namespace['_tempdir'] = TempDirectory() 16 | 17 | 18 | def sybil_teardown(namespace): 19 | namespace['_tempdir'].cleanup() 20 | 21 | 22 | pytest_collect_file = Sybil( 23 | parsers=[ 24 | DocTestParser(optionflags=REPORT_NDIFF|ELLIPSIS), 25 | PythonCodeBlockParser(), 26 | parse_captures, 27 | FileParser('tempdir'), 28 | skip, 29 | ], 30 | patterns=['*.txt', '*.py'], 31 | setup=sybil_setup, teardown=sybil_teardown, 32 | fixtures=['tmp_path'], 33 | exclude='testfixtures/tests/*.py' 34 | ).pytest() 35 | -------------------------------------------------------------------------------- /docs/installation.txt: -------------------------------------------------------------------------------- 1 | Installation Instructions 2 | ========================= 3 | 4 | If you want to experiment with testfixtures, the easiest way to 5 | install it is to do the following in a virtualenv: 6 | 7 | .. code-block:: bash 8 | 9 | pip install testfixtures 10 | 11 | 12 | If you are using conda, testfixtures can be installed as follows: 13 | 14 | 15 | .. code-block:: bash 16 | 17 | conda install -c conda-forge testfixtures 18 | 19 | 20 | If your package uses setuptools and you decide to use testfixtures, 21 | then you should do one of the following: 22 | 23 | - Specify ``testfixtures`` in the ``tests_require`` parameter of your 24 | package's call to ``setup`` in :file:`setup.py`. 25 | 26 | - Add an ``extra_requires`` parameter in your call to ``setup`` as 27 | follows: 28 | 29 | .. invisible-code-block: python 30 | 31 | from testfixtures.mock import Mock 32 | setup = Mock() 33 | 34 | .. code-block:: python 35 | 36 | setup( 37 | # other stuff here 38 | extras_require=dict( 39 | test=['testfixtures'], 40 | ) 41 | ) 42 | -------------------------------------------------------------------------------- /testfixtures/tests/test_diff.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from testfixtures import diff 4 | 5 | 6 | class TestDiff(TestCase): 7 | 8 | def test_example(self): 9 | actual = diff(''' 10 | line1 11 | line2 12 | line3 13 | ''', 14 | ''' 15 | line1 16 | line changed 17 | line3 18 | ''') 19 | expected = '''\ 20 | --- first 21 | +++ second 22 | @@ -1,5 +1,5 @@ 23 | 24 | line1 25 | - line2 26 | + line changed 27 | line3 28 | ''' 29 | self.assertEqual( 30 | [line.strip() for line in expected.split("\n")], 31 | [line.strip() for line in actual.split("\n")], 32 | '\n%r\n!=\n%r' % (expected, actual) 33 | ) 34 | 35 | def test_no_newlines(self): 36 | actual = diff('x', 'y') 37 | expected = '--- first\n+++ second\n@@ -1 +1 @@\n-x\n+y' 38 | self.assertEqual( 39 | expected, 40 | actual, 41 | '\n%r\n!=\n%r' % (expected, actual) 42 | ) 43 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2008-2015 Simplistix Ltd 2 | Copyright (c) 2015 onwards Chris Withers 3 | 4 | Permission is hereby granted, free of charge, to any person 5 | obtaining a copy of this software and associated documentation 6 | files (the "Software"), to deal in the Software without restriction, 7 | including without limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of the Software, 9 | and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be 13 | included in all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 17 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 19 | BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 20 | ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | disallow_untyped_defs = True 3 | disallow_incomplete_defs = True 4 | plugins = 5 | mypy_django_plugin.main, 6 | mypy_zope:plugin, 7 | 8 | [mypy.plugins.django-stubs] 9 | django_settings_module = "testfixtures.tests.test_django.settings" 10 | 11 | # "nice to have" stuff to fix: 12 | [mypy-testfixtures.tests.*] 13 | disable_error_code = no-untyped-call,no-untyped-def 14 | 15 | # Be more picky with some test files: 16 | [mypy-testfixtures.tests.test_compare_types] 17 | enable_error_code = no-untyped-call,no-untyped-def 18 | warn_unused_ignores = true 19 | check_untyped_defs = true 20 | no_implicit_reexport = true 21 | warn_return_any = true 22 | no_implicit_optional = true 23 | 24 | [mypy-testfixtures.tests.test_date] 25 | enable_error_code = no-untyped-call,no-untyped-def 26 | 27 | [mypy-testfixtures.tests.test_datetime] 28 | enable_error_code = no-untyped-call,no-untyped-def 29 | 30 | [mypy-testfixtures.tests.test_outputcapture] 31 | enable_error_code = no-untyped-call,no-untyped-def 32 | 33 | [mypy-testfixtures.tests.test_should_raise] 34 | enable_error_code = no-untyped-call,no-untyped-def 35 | 36 | [mypy-testfixtures.tests.test_time] 37 | enable_error_code = no-untyped-call,no-untyped-def 38 | 39 | # permanent exclusions and workaround: 40 | [mypy-constantly.*] 41 | ignore_missing_imports = True 42 | 43 | [mypy-guppy] 44 | # guppy isn't actually ever installed: 45 | ignore_missing_imports = True 46 | -------------------------------------------------------------------------------- /testfixtures/tests/test_stringcomparison.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from testfixtures import StringComparison as S, compare 4 | from unittest import TestCase 5 | 6 | 7 | class Tests(TestCase): 8 | 9 | def test_equal_yes(self): 10 | self.assertTrue('on 40220' == S(r'on \d+')) 11 | 12 | def test_equal_no(self): 13 | self.assertFalse('on xxx' == S(r'on \d+')) 14 | 15 | def test_not_equal_yes(self): 16 | self.assertFalse('on 40220' != S(r'on \d+')) 17 | 18 | def test_not_equal_no(self): 19 | self.assertTrue('on xxx' != S(r'on \d+')) 20 | 21 | def test_comp_in_sequence(self): 22 | self.assertTrue((1, 2, 'on 40220') == (1, 2, S(r'on \d+'))) 23 | 24 | def test_not_string(self): 25 | self.assertFalse(40220 == S(r'on \d+')) 26 | 27 | def test_repr(self): 28 | compare('', repr(S(r'on \d+'))) 29 | 30 | def test_str(self): 31 | compare('', str(S(r'on \d+'))) 32 | 33 | def test_sort(self): 34 | a = S('a') 35 | b = S('b') 36 | c = S('c') 37 | compare(sorted(('d', c, 'e', a, 'a1', b)), expected=[a, 'a1', b, c, 'd', 'e']) 38 | 39 | def test_flags_argument(self): 40 | compare(S(".*bar", re.DOTALL), actual="foo\nbar") 41 | 42 | def test_flags_parameter(self): 43 | compare(S(".*bar", flags=re.DOTALL), actual="foo\nbar") 44 | 45 | def test_flags_names(self): 46 | compare(S(".*BaR", dotall=True, ignorecase=True), actual="foo\nbar") 47 | -------------------------------------------------------------------------------- /testfixtures/mock.py: -------------------------------------------------------------------------------- 1 | """ 2 | A facade for either :mod:`unittest.mock` or its `rolling backport`__, if it is 3 | installed, with a preference for the latter as it may well have newer functionality 4 | and bugfixes. 5 | 6 | The facade also contains any bugfixes that are critical to the operation of 7 | functionality provided by testfixtures. 8 | 9 | __ https://mock.readthedocs.io 10 | """ 11 | import sys 12 | 13 | backport_version: tuple[int, int, int] | None 14 | 15 | try: 16 | from mock import * 17 | from mock.mock import _Call, _Sentinel 18 | from mock.mock import call as mock_call 19 | from mock import version_info as backport_version 20 | except ImportError: 21 | backport_version = None 22 | class MockCall: 23 | pass 24 | mock_call = MockCall() # type: ignore[assignment] 25 | from unittest.mock import * # type: ignore[assignment] 26 | from unittest.mock import _Call, _Sentinel # type: ignore[assignment] 27 | 28 | 29 | has_backport = backport_version is not None 30 | 31 | if not ( 32 | (has_backport and backport_version[:3] > (2, 0, 0)) or # type: ignore[index] 33 | (sys.version_info < (3, 0, 0) and not has_backport) or 34 | (3, 6, 7) < sys.version_info[:3] < (3, 7, 0) or 35 | sys.version_info[:3] > (3, 7, 1) 36 | ): # pragma: no cover 37 | raise ImportError('Please upgrade Python (you have {}) or Mock Backport (You have {})'.format( 38 | sys.version_info, backport_version 39 | )) 40 | parent_name = '_mock_parent' 41 | -------------------------------------------------------------------------------- /testfixtures/tests/sample1.py: -------------------------------------------------------------------------------- 1 | # NB: This file is used in the documentation, if you make changes, ensure 2 | # you update the line numbers in popen.txt! 3 | """ 4 | A sample module containing the kind of code that 5 | testfixtures helps with testing 6 | """ 7 | 8 | from datetime import datetime, date 9 | 10 | 11 | def str_now_1(): 12 | return str(datetime.now()) 13 | 14 | now = datetime.now 15 | 16 | 17 | def str_now_2(): 18 | return str(now()) 19 | 20 | 21 | def str_today_1(): 22 | return str(date.today()) 23 | 24 | today = date.today 25 | 26 | 27 | def str_today_2(): 28 | return str(today()) 29 | 30 | from time import time 31 | 32 | 33 | def str_time(): 34 | return str(time()) 35 | 36 | 37 | class X: 38 | 39 | def y(self): 40 | return "original y" 41 | 42 | @classmethod 43 | def aMethod(cls): 44 | return cls 45 | 46 | @staticmethod 47 | def bMethod(): 48 | return 2 49 | 50 | 51 | def z(): 52 | return "original z" 53 | 54 | 55 | class SampleClassA: 56 | def __init__(self, *args): 57 | self.args = args 58 | 59 | 60 | class SampleClassB(SampleClassA): 61 | pass 62 | 63 | 64 | def a_function(): 65 | return (SampleClassA(1), SampleClassB(2), SampleClassA(3)) 66 | 67 | some_dict = dict( 68 | key='value', 69 | complex_key=[1, 2, 3], 70 | ) 71 | 72 | 73 | class Slotted: 74 | 75 | __slots__ = ['x', 'y'] 76 | 77 | def __init__(self, x, y): 78 | self.x = x 79 | self.y = y 80 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2008-2014 Simplistix Ltd, 2015 onwards Chris Withers 2 | # See license.txt for license details. 3 | 4 | import os 5 | 6 | from setuptools import setup, find_packages 7 | 8 | name = 'testfixtures' 9 | base_dir = os.path.dirname(__file__) 10 | 11 | optional = [ 12 | 'django', 13 | 'sybil>=6', 14 | 'twisted' 15 | ] 16 | 17 | setup( 18 | name=name, 19 | version=open(os.path.join(base_dir, name, 'version.txt')).read().strip(), 20 | author='Chris Withers', 21 | author_email='chris@simplistix.co.uk', 22 | license='MIT', 23 | description=("A collection of helpers and mock objects " 24 | "for unit tests and doc tests."), 25 | long_description=open(os.path.join(base_dir, 'README.rst')).read(), 26 | url='https://github.com/Simplistix/testfixtures', 27 | classifiers=[ 28 | 'Development Status :: 5 - Production/Stable', 29 | 'Intended Audience :: Developers', 30 | 'Programming Language :: Python :: 3', 31 | ], 32 | packages=find_packages(), 33 | zip_safe=False, 34 | package_data={'testfixtures': ['py.typed', 'version.txt']}, 35 | include_package_data=True, 36 | python_requires=">=3.11", 37 | extras_require=dict( 38 | test=['django-stubs', 39 | 'mypy>=1.19', 40 | 'mypy-zope', 41 | 'pytest>=7.1', 42 | 'pytest-cov', 43 | 'pytest-django', 44 | 'types-mock', 45 | ]+optional, 46 | docs=['sphinx', 'furo']+optional, 47 | build=['setuptools-git', 'wheel', 'twine'] 48 | ) 49 | ) 50 | -------------------------------------------------------------------------------- /docs/development.txt: -------------------------------------------------------------------------------- 1 | Development 2 | =========== 3 | 4 | .. highlight:: bash 5 | 6 | If you wish to contribute to this project, then you should fork the 7 | repository found here: 8 | 9 | https://github.com/simplistix/testfixtures/ 10 | 11 | Once that has been done and you have a checkout, 12 | you can follow the instructions below to perform various development tasks. 13 | 14 | For detailed development guidelines, code style requirements, and additional commands, 15 | see ``AGENTS.md`` in the repository root. 16 | 17 | Setting up a virtualenv 18 | ----------------------- 19 | 20 | The recommended way to set up a development environment is to create 21 | a virtualenv and then install the package in editable form as follows: 22 | 23 | .. code-block:: bash 24 | 25 | python3 -m venv .venv 26 | source .venv/bin/activate 27 | pip install -U pip setuptools 28 | pip install -U -e .[test,build] 29 | 30 | Running the tests 31 | ----------------- 32 | 33 | Once you've set up a virtualenv, the tests can be run in the activated 34 | virtualenv and from the root of a source checkout as follows: 35 | 36 | .. code-block:: bash 37 | 38 | pytest 39 | 40 | Building the documentation 41 | -------------------------- 42 | 43 | The Sphinx documentation is built by doing the following from the 44 | directory containing ``setup.py``: 45 | 46 | .. code-block:: bash 47 | 48 | cd docs 49 | make html 50 | 51 | To check that the description that will be used on PyPI renders properly, 52 | do the following: 53 | 54 | .. code-block:: bash 55 | 56 | python setup.py --long-description | rst2html.py > desc.html 57 | 58 | The resulting ``desc.html`` should be checked by opening in a browser. 59 | 60 | Making a release 61 | ---------------- 62 | 63 | To make a release, just update ``version.txt``, update the change log 64 | and push to https://github.com/simplistix/testfixtures. 65 | Carthorse should take care of the rest. 66 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Testfixtures 2 | ============ 3 | 4 | |Docs|_ |PyPI|_ |Git|_ |CircleCI|_ 5 | 6 | .. |Docs| image:: https://readthedocs.org/projects/testfixtures/badge/?version=latest 7 | .. _Docs: http://testfixtures.readthedocs.org/en/latest/ 8 | 9 | .. |PyPI| image:: https://badge.fury.io/py/testfixtures.svg 10 | .. _PyPI: https://pypi.org/project/testfixtures/ 11 | 12 | .. |Git| image:: https://badge.fury.io/gh/simplistix%2Ftestfixtures.svg 13 | .. _Git: https://github.com/simplistix/testfixtures 14 | 15 | .. |CircleCI| image:: https://circleci.com/gh/simplistix/testfixtures/tree/master.svg?style=shield 16 | .. _CircleCI: https://circleci.com/gh/simplistix/testfixtures/tree/master 17 | 18 | Testfixtures is a collection of helpers and mock objects that are useful when 19 | writing automated tests in Python. 20 | 21 | The areas of testing this package can help with are listed below: 22 | 23 | **Comparing objects and sequences** 24 | 25 | Better feedback when the results aren't as you expected along with 26 | support for comparison of objects that don't normally support 27 | comparison and comparison of deeply nested datastructures. 28 | 29 | **Mocking out objects and methods** 30 | 31 | Easy to use ways of stubbing out objects, classes or individual 32 | methods. Specialised helpers and mock objects are provided, including sub-processes, 33 | dates and times. 34 | 35 | **Testing logging** 36 | 37 | Helpers for capturing logging and checking what has been logged is what was expected. 38 | 39 | **Testing stream output** 40 | 41 | Helpers for capturing stream output, such as that from print function calls or even 42 | stuff written directly to file descriptors, and making assertions about it. 43 | 44 | **Testing with files and directories** 45 | 46 | Support for creating and checking both files and directories in sandboxes 47 | including support for other common path libraries. 48 | 49 | **Testing exceptions and warnings** 50 | 51 | Easy to use ways of checking that a certain exception is raised, 52 | or a warning is issued, even down the to the parameters provided. 53 | 54 | **Testing when using django** 55 | 56 | Helpers for comparing instances of django models. 57 | 58 | **Testing when using Twisted** 59 | 60 | Helpers for making assertions about logging when using Twisted. 61 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import datetime 3 | import os 4 | import time 5 | 6 | import pkg_resources 7 | 8 | on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 9 | build_date = datetime.datetime.utcfromtimestamp(int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))) 10 | 11 | extensions = [ 12 | 'sphinx.ext.autodoc', 13 | 'sphinx.ext.intersphinx' 14 | ] 15 | 16 | intersphinx_mapping = { 17 | 'python': ('https://docs.python.org/3/', None), 18 | 'django': ('https://django.readthedocs.io/en/latest/', None), 19 | 'pytest': ('https://docs.pytest.org/en/latest/', None), 20 | 'sybil': ('https://sybil.readthedocs.io/en/latest/', None), 21 | } 22 | 23 | # General 24 | source_suffix = '.txt' 25 | master_doc = 'index' 26 | project = 'testfixtures' 27 | copyright = '2008-2015 Simplistix Ltd, 2016-%s Chris Withers' % build_date.year 28 | version = release = pkg_resources.get_distribution(project).version 29 | exclude_trees = ['_build'] 30 | pygments_style = 'sphinx' 31 | 32 | # Options for HTML output 33 | html_theme = 'furo' 34 | htmlhelp_basename = project+'doc' 35 | 36 | # Options for LaTeX output 37 | latex_documents = [ 38 | ('index', project+'.tex', project+u' Documentation', 39 | 'Simplistix Ltd', 'manual'), 40 | ] 41 | 42 | exclude_patterns = ['**/furo.js.LICENSE.txt'] 43 | 44 | nitpicky = True 45 | nitpick_ignore = [ 46 | ('py:class', 'P'), # param spec 47 | ('py:class', 'constantly._constants.NamedConstant'), # twisted logging constants 48 | ('py:class', 'django.db.models.base.Model'), # not documented upstream 49 | ('py:class', 'module'), # ModuleType not documented. 50 | ('py:class', 'tempfile.TemporaryFile'), # not documented as a class so type annotation broken 51 | ('py:class', 'testfixtures.comparison.S'), # type var 52 | ('py:class', 'testfixtures.comparison.S_'), # type var 53 | ('py:class', 'testfixtures.comparison.T'), # type var 54 | ('py:class', 'testfixtures.datetime.MockedCurrent'), # internal class that shouldn't be doc'ed 55 | ('py:class', 'testfixtures.replace.R'), # type var 56 | ('py:class', 'testfixtures.shouldraise.E'), # type var 57 | ('py:class', 'testfixtures.utils.T'), # type var 58 | ('py:class', 'testfixtures.utils.U'), # type var 59 | ('py:class', 'twisted.trial.unittest.TestCase'), # twisted doesn't use sphinx 60 | ('py:class', 'unittest.case.TestCase'), # no docs, apparently 61 | ('py:class', 'unittest.mock._Call'), # No docstring. 62 | ] 63 | -------------------------------------------------------------------------------- /testfixtures/__init__.py: -------------------------------------------------------------------------------- 1 | class singleton: 2 | 3 | def __init__(self, name: str) -> None: 4 | self.name = name 5 | 6 | def __repr__(self) -> str: 7 | return '<%s>' % self.name 8 | 9 | __str__ = __repr__ 10 | 11 | 12 | not_there: singleton = singleton('not_there') 13 | 14 | 15 | from testfixtures.comparison import ( 16 | Comparison, StringComparison, RoundComparison, compare, diff, RangeComparison, 17 | SequenceComparison, Subset, Permutation, MappingComparison, like, sequence, 18 | contains, unordered 19 | ) 20 | from testfixtures.datetime import mock_datetime, mock_date, mock_time 21 | from testfixtures.logcapture import LogCapture, log_capture 22 | from testfixtures.outputcapture import OutputCapture 23 | from testfixtures.resolve import resolve 24 | from testfixtures.replace import ( 25 | Replacer, 26 | Replace, 27 | replace, 28 | replace_in_environ, 29 | replace_on_class, 30 | replace_in_module, 31 | ) 32 | from testfixtures.shouldraise import ShouldRaise, should_raise, ShouldAssert 33 | from testfixtures.shouldwarn import ShouldWarn, ShouldNotWarn 34 | from testfixtures.tempdirectory import TempDirectory, tempdir 35 | from testfixtures.utils import wrap, generator 36 | 37 | 38 | # backwards compatibility for the old names 39 | test_datetime = mock_datetime 40 | test_datetime.__test__ = False # type: ignore[attr-defined] 41 | test_date = mock_date 42 | test_date.__test__ = False # type: ignore[attr-defined] 43 | test_time = mock_time 44 | test_time.__test__ = False # type: ignore[attr-defined] 45 | 46 | __all__ = [ 47 | 'Comparison', 48 | 'LogCapture', 49 | 'MappingComparison', 50 | 'OutputCapture', 51 | 'Permutation', 52 | 'RangeComparison', 53 | 'Replace', 54 | 'Replacer', 55 | 'RoundComparison', 56 | 'SequenceComparison', 57 | 'ShouldAssert', 58 | 'ShouldRaise', 59 | 'ShouldNotWarn', 60 | 'ShouldWarn', 61 | 'Subset', 62 | 'StringComparison', 63 | 'TempDirectory', 64 | 'compare', 65 | 'contains', 66 | 'diff', 67 | 'generator', 68 | 'like', 69 | 'log_capture', 70 | 'mock_date', 71 | 'mock_datetime', 72 | 'mock_time', 73 | 'not_there', 74 | 'replace', 75 | 'replace_in_environ', 76 | 'replace_on_class', 77 | 'replace_in_module', 78 | 'resolve', 79 | 'sequence', 80 | 'should_raise', 81 | 'singleton', 82 | 'tempdir', 83 | 'test_date', 84 | 'test_datetime', 85 | 'test_time', 86 | 'unordered', 87 | 'wrap', 88 | ] 89 | -------------------------------------------------------------------------------- /testfixtures/resolve.py: -------------------------------------------------------------------------------- 1 | from operator import setitem 2 | from typing import Any, Callable, Tuple, TypeAlias, Literal 3 | 4 | from testfixtures import not_there 5 | 6 | 7 | Setter: TypeAlias = Callable[[object, str, Any], None] | Callable[[Any, int, Any], None] | None 8 | Key: TypeAlias = Tuple[int, Setter, str | int | None] 9 | 10 | class Resolved: 11 | 12 | def __init__(self, container: Any, setter: Setter, name: Any, found: Any): 13 | self.container: Any = container 14 | self.setter = setter 15 | self.name = name 16 | self.found: Any = found 17 | 18 | def key(self) -> Key: 19 | return id(self.container), self.setter, self.name 20 | 21 | 22 | def resolve(dotted_name: str, container: Any | None = None, sep: str = '.') -> Resolved: 23 | names = dotted_name.split(sep) 24 | used = names.pop(0) 25 | found: Any 26 | if container is None: 27 | found = __import__(used) 28 | container = found 29 | else: 30 | assert not used, 'Absolute traversal not allowed when container supplied' 31 | used = '' 32 | found = container 33 | setter: Setter = None 34 | name: Any = None 35 | for name in names: 36 | container = found 37 | used += '.' + name 38 | try: 39 | found = getattr(found, name) 40 | setter = setattr 41 | except AttributeError: 42 | try: 43 | if sep != '.': 44 | raise ImportError 45 | __import__(used) 46 | except ImportError: 47 | setter = setitem 48 | try: 49 | found = found[name] 50 | except KeyError: 51 | found = not_there 52 | except TypeError: 53 | try: 54 | name = int(name) 55 | except ValueError: 56 | setter = setattr 57 | found = not_there 58 | else: 59 | found = found[name] 60 | else: 61 | found = getattr(found, name) 62 | setter = getattr 63 | if found is not_there: 64 | break 65 | return Resolved(container, setter, name, found) 66 | 67 | 68 | class _Reference: 69 | 70 | @classmethod 71 | def classmethod(cls) -> None: # pragma: no cover 72 | pass 73 | 74 | @staticmethod 75 | def staticmethod() -> None: # pragma: no cover 76 | pass 77 | 78 | 79 | class_type = type(_Reference) 80 | classmethod_type = type(_Reference.classmethod) 81 | -------------------------------------------------------------------------------- /testfixtures/tests/test_mock.py: -------------------------------------------------------------------------------- 1 | from testfixtures.mock import Mock, call, ANY 2 | 3 | from .test_compare import CompareHelper 4 | 5 | class TestCall(CompareHelper): 6 | 7 | def test_non_root_call_not_equal(self): 8 | self.check_raises( 9 | call.foo().bar(), 10 | call.baz().bar(), 11 | '\n' 12 | "'call.foo().bar()'\n" 13 | '!=\n' 14 | "'call.baz().bar()'" 15 | ) 16 | 17 | def test_non_root_attr_not_equal(self): 18 | self.check_raises( 19 | call.foo.bar(), 20 | call.baz.bar(), 21 | '\n' 22 | "'call.foo.bar()'\n" 23 | '!=\n' 24 | "'call.baz.bar()'" 25 | ) 26 | 27 | def test_non_root_params_not_equal(self): 28 | self.check_raises( 29 | call.foo(x=1).bar(), 30 | call.foo(x=2).bar(), 31 | '\n' 32 | "'call.foo(x=1)'\n" 33 | '!=\n' 34 | "'call.foo(x=2)'" 35 | ) 36 | 37 | def test_any(self): 38 | assert call == ANY 39 | 40 | def test_no_len(self): 41 | assert not call == object() 42 | 43 | def test_two_elements(self): 44 | m = Mock() 45 | m(x=1) 46 | assert m.call_args == ((), {'x': 1}) 47 | 48 | def test_other_empty(self): 49 | assert call == () 50 | 51 | def test_other_single(self): 52 | assert call == ((),) 53 | assert call == ({},) 54 | assert call == ('',) 55 | 56 | def test_other_double(self): 57 | assert call == ('', (),) 58 | assert call == ('', {},) 59 | 60 | def test_other_quad(self): 61 | assert not call == (1, 2, 3, 4) 62 | 63 | 64 | class TestMock(CompareHelper): 65 | 66 | def test_non_root_call_not_equal(self): 67 | m = Mock() 68 | m.foo().bar() 69 | self.check_raises( 70 | m.mock_calls[-1], 71 | call.baz().bar(), 72 | '\n' 73 | "'call.foo().bar()'\n" 74 | '!=\n' 75 | "'call.baz().bar()'" 76 | ) 77 | 78 | def test_non_root_attr_not_equal(self): 79 | m = Mock() 80 | m.foo.bar() 81 | self.check_raises( 82 | m.mock_calls[-1], 83 | call.baz.bar(), 84 | '\n' 85 | "'call.foo.bar()'\n" 86 | '!=\n' 87 | "'call.baz.bar()'" 88 | ) 89 | 90 | def test_non_root_params_not_equal(self): 91 | m = Mock() 92 | m.foo(x=1).bar() 93 | # surprising and annoying (and practically unsolvable :-/): 94 | assert m.mock_calls[-1] == call.foo(y=2).bar() 95 | -------------------------------------------------------------------------------- /testfixtures/sybil.py: -------------------------------------------------------------------------------- 1 | import os 2 | import textwrap 3 | from dataclasses import dataclass 4 | from typing import Iterable 5 | 6 | from sybil import Document, Region, Example 7 | from sybil.parsers.rest.lexers import DirectiveLexer 8 | 9 | from testfixtures import diff 10 | 11 | 12 | @dataclass 13 | class FileBlock: 14 | path: str 15 | content: str 16 | action: str 17 | 18 | 19 | class FileParser: 20 | """ 21 | A `Sybil `__ parser that 22 | parses certain ReST sections to read and write files in the 23 | configured :class:`~testfixtures.TempDirectory`. 24 | 25 | :param name: This is the name of the :class:`~testfixtures.TempDirectory` to use 26 | in the Sybil test namespace. 27 | """ 28 | 29 | def __init__(self, name: str): 30 | self.name = name 31 | self.lexer = DirectiveLexer('topic', arguments='.+') 32 | 33 | def __call__(self, document: Document) -> Iterable[Region]: 34 | for region in self.lexer(document): 35 | options = region.lexemes.get('options') 36 | if options is not None: 37 | class_ = options.get('class') 38 | if class_ in ('read-file', 'write-file'): 39 | lines = region.lexemes['source'].splitlines(keepends=True) 40 | index = 0 41 | if lines[index].strip() == '::': 42 | index += 1 43 | source = textwrap.dedent(''.join(lines[index:])).lstrip() 44 | if source[-1] != '\n': 45 | source += '\n' 46 | region.parsed = FileBlock( 47 | path=region.lexemes['arguments'], 48 | content=source, 49 | action=class_.split('-')[0] 50 | ) 51 | region.evaluator = self.evaluate 52 | yield region 53 | 54 | def evaluate(self, example: Example) -> str | None: 55 | block: FileBlock = example.parsed 56 | temp_directory = example.namespace[self.name] 57 | if block.action == 'read': 58 | actual = temp_directory.as_path(block.path).read_text().replace(os.linesep, '\n') 59 | if actual != block.content: 60 | return diff( 61 | block.content, 62 | actual, 63 | 'File %r, line %i:' % (example.path, example.line), 64 | 'Reading from "%s":' % temp_directory.as_string(block.path) 65 | ) 66 | if block.action == 'write': 67 | temp_directory.write(block.path, block.content) 68 | return None 69 | -------------------------------------------------------------------------------- /docs/django.txt: -------------------------------------------------------------------------------- 1 | Testing with Django 2 | =================== 3 | 4 | Django's ORM has an unfortunate implementation choice of considering 5 | :class:`~django.db.models.Model` instances to be identical as long as their 6 | primary keys are the same: 7 | 8 | >>> from testfixtures.tests.test_django.models import SampleModel 9 | >>> SampleModel(id=1, value=1) == SampleModel(id=1, value=2) 10 | True 11 | 12 | To work around this, :mod:`testfixtures.django` :ref:`registers ` 13 | a :func:`comparer ` for the django 14 | :class:`~django.db.models.Model` class. However, for this to work, 15 | ``ignore_eq=True`` must be passed: 16 | 17 | >>> from testfixtures import compare 18 | >>> import testfixtures.django # to register the comparer... 19 | >>> compare(SampleModel(id=1, value=1), SampleModel(id=1, value=2), 20 | ... ignore_eq=True) 21 | Traceback (most recent call last): 22 | ... 23 | AssertionError: SampleModel not as expected: 24 | 25 | same: 26 | ['id'] 27 | 28 | values differ: 29 | 'value': 1 != 2 30 | 31 | Since the above can quickly become cumbersome, a django-specific version 32 | of :func:`~testfixtures.compare` that ignores ``__eq__`` by default is provided: 33 | 34 | >>> from testfixtures.django import compare as django_compare 35 | >>> django_compare(SampleModel(id=1, value=1), SampleModel(id=1, value=2)) 36 | Traceback (most recent call last): 37 | ... 38 | AssertionError: SampleModel not as expected: 39 | 40 | same: 41 | ['id'] 42 | 43 | values differ: 44 | 'value': 1 != 2 45 | 46 | Ignoring fields 47 | --------------- 48 | 49 | It may also be that you want to ignore fields over which you have no control 50 | and cannot easily mock, such as created or modified times. For this, you 51 | can use the `ignore_fields` option: 52 | 53 | >>> compare(SampleModel(id=1, value=1), SampleModel(id=1, value=2), 54 | ... ignore_eq=True, ignore_fields=['value']) 55 | 56 | 57 | Comparing non-editable fields 58 | ----------------------------- 59 | 60 | By default, non-editable fields are ignored: 61 | 62 | >>> django_compare(SampleModel(not_editable=1), SampleModel(not_editable=2)) 63 | 64 | If you wish to include these fields in the comparison, pass the 65 | ``non_editable_fields`` option: 66 | 67 | >>> django_compare(SampleModel(not_editable=1), SampleModel(not_editable=2), 68 | ... non_editable_fields=True) 69 | Traceback (most recent call last): 70 | ... 71 | AssertionError: SampleModel not as expected: 72 | 73 | same: 74 | ['created', 'id', 'value'] 75 | 76 | values differ: 77 | 'not_editable': 1 != 2 78 | 79 | 80 | .. note:: 81 | 82 | The registered comparer currently ignores 83 | :class:`many to many ` fields. 84 | Patches to fix this deficiency are welcome! 85 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD ?= sphinx-build 7 | PAPER = 8 | 9 | # Internal variables. 10 | PAPEROPT_a4 = -D latex_paper_size=a4 11 | PAPEROPT_letter = -D latex_paper_size=letter 12 | ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 13 | 14 | .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest 15 | 16 | help: 17 | @echo "Please use \`make ' where is one of" 18 | @echo " html to make standalone HTML files" 19 | @echo " dirhtml to make HTML files named index.html in directories" 20 | @echo " pickle to make pickle files" 21 | @echo " json to make JSON files" 22 | @echo " htmlhelp to make HTML files and a HTML help project" 23 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 24 | @echo " changes to make an overview of all changed/added/deprecated items" 25 | @echo " linkcheck to check all external links for integrity" 26 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 27 | 28 | clean: 29 | -rm -rf _build/* 30 | 31 | html: 32 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html 33 | @echo 34 | @echo "Build finished. The HTML pages are in _build/html." 35 | 36 | dirhtml: 37 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml 38 | @echo 39 | @echo "Build finished. The HTML pages are in _build/dirhtml." 40 | 41 | pickle: 42 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle 43 | @echo 44 | @echo "Build finished; now you can process the pickle files." 45 | 46 | json: 47 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json 48 | @echo 49 | @echo "Build finished; now you can process the JSON files." 50 | 51 | htmlhelp: 52 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp 53 | @echo 54 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 55 | ".hhp project file in _build/htmlhelp." 56 | 57 | latex: 58 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex 59 | @echo 60 | @echo "Build finished; the LaTeX files are in _build/latex." 61 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 62 | "run these through (pdf)latex." 63 | 64 | changes: 65 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes 66 | @echo 67 | @echo "The overview file is in _build/changes." 68 | 69 | linkcheck: 70 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck 71 | @echo 72 | @echo "Link check complete; look for any errors in the above output " \ 73 | "or in _build/linkcheck/output.txt." 74 | 75 | doctest: 76 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest 77 | @echo "Testing of doctests in the sources finished, look at the " \ 78 | "results in _build/doctest/output.txt." 79 | -------------------------------------------------------------------------------- /testfixtures/rmtree.py: -------------------------------------------------------------------------------- 1 | # lamosity needed to make things reliable on Windows :-( 2 | # (borrowed from Python's test_support.py) 3 | import errno 4 | import os 5 | import shutil 6 | import sys 7 | import time 8 | import warnings 9 | from pathlib import Path 10 | 11 | if sys.platform.startswith("win"): # pragma: no cover 12 | def _waitfor(func, pathname, waitall=False): 13 | # Perform the operation 14 | func(pathname) 15 | # Now setup the wait loop 16 | if waitall: 17 | dirname = pathname 18 | else: 19 | dirname, name = os.path.split(pathname) 20 | dirname = dirname or '.' 21 | # Check for `pathname` to be removed from the filesystem. 22 | # The exponential backoff of the timeout amounts to a total 23 | # of ~1 second after which the deletion is probably an error 24 | # anyway. 25 | # Testing on a i7@4.3GHz shows that usually only 1 iteration is 26 | # required when contention occurs. 27 | timeout = 0.001 28 | while timeout < 1.0: # pragma: no branch 29 | # Note we are only testing for the existence of the file(s) in 30 | # the contents of the directory regardless of any security or 31 | # access rights. If we have made it this far, we have sufficient 32 | # permissions to do that much using Python's equivalent of the 33 | # Windows API FindFirstFile. 34 | # Other Windows APIs can fail or give incorrect results when 35 | # dealing with files that are pending deletion. 36 | L = os.listdir(dirname) 37 | if not (L if waitall else name in L): # pragma: no branch 38 | return 39 | # Increase the timeout and try again 40 | time.sleep(timeout) # pragma: no cover 41 | timeout *= 2 # pragma: no cover 42 | warnings.warn('tests may fail, delete still pending for ' 43 | + pathname, # pragma: no cover 44 | RuntimeWarning, stacklevel=4) 45 | 46 | def _rmtree(path): 47 | def _rmtree_inner(path): 48 | for name in os.listdir(path): 49 | fullname = os.path.join(path, name) 50 | if os.path.isdir(fullname): 51 | _waitfor(_rmtree_inner, fullname, waitall=True) 52 | os.rmdir(fullname) 53 | else: 54 | os.unlink(fullname) 55 | _waitfor(_rmtree_inner, path, waitall=True) 56 | _waitfor(os.rmdir, path) 57 | else: 58 | _rmtree = shutil.rmtree 59 | 60 | 61 | def rmtree(path: str | Path) -> None: 62 | try: 63 | _rmtree(path) 64 | except OSError as e: # pragma: no cover 65 | # Unix returns ENOENT, Windows returns ESRCH. 66 | if e.errno not in (errno.ENOENT, errno.ESRCH): # pragma: no branch 67 | raise 68 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | set SPHINXBUILD=..\bin\sphinx-build 6 | set ALLSPHINXOPTS=-d _build/doctrees %SPHINXOPTS% . 7 | if NOT "%PAPER%" == "" ( 8 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 9 | ) 10 | 11 | if "%1" == "" goto help 12 | 13 | if "%1" == "help" ( 14 | :help 15 | echo.Please use `make ^` where ^ is one of 16 | echo. html to make standalone HTML files 17 | echo. dirhtml to make HTML files named index.html in directories 18 | echo. pickle to make pickle files 19 | echo. json to make JSON files 20 | echo. htmlhelp to make HTML files and a HTML help project 21 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 22 | echo. changes to make an overview over all changed/added/deprecated items 23 | echo. linkcheck to check all external links for integrity 24 | echo. doctest to run all doctests embedded in the documentation if enabled 25 | goto end 26 | ) 27 | 28 | if "%1" == "clean" ( 29 | for /d %%i in (_build\*) do rmdir /q /s %%i 30 | del /q /s _build\* 31 | goto end 32 | ) 33 | 34 | if "%1" == "html" ( 35 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% _build/html 36 | echo. 37 | echo.Build finished. The HTML pages are in _build/html. 38 | goto end 39 | ) 40 | 41 | if "%1" == "dirhtml" ( 42 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% _build/dirhtml 43 | echo. 44 | echo.Build finished. The HTML pages are in _build/dirhtml. 45 | goto end 46 | ) 47 | 48 | if "%1" == "pickle" ( 49 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% _build/pickle 50 | echo. 51 | echo.Build finished; now you can process the pickle files. 52 | goto end 53 | ) 54 | 55 | if "%1" == "json" ( 56 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% _build/json 57 | echo. 58 | echo.Build finished; now you can process the JSON files. 59 | goto end 60 | ) 61 | 62 | if "%1" == "htmlhelp" ( 63 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% _build/htmlhelp 64 | echo. 65 | echo.Build finished; now you can run HTML Help Workshop with the ^ 66 | .hhp project file in _build/htmlhelp. 67 | goto end 68 | ) 69 | 70 | if "%1" == "latex" ( 71 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% _build/latex 72 | echo. 73 | echo.Build finished; the LaTeX files are in _build/latex. 74 | goto end 75 | ) 76 | 77 | if "%1" == "changes" ( 78 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% _build/changes 79 | echo. 80 | echo.The overview file is in _build/changes. 81 | goto end 82 | ) 83 | 84 | if "%1" == "linkcheck" ( 85 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% _build/linkcheck 86 | echo. 87 | echo.Link check complete; look for any errors in the above output ^ 88 | or in _build/linkcheck/output.txt. 89 | goto end 90 | ) 91 | 92 | if "%1" == "doctest" ( 93 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% _build/doctest 94 | echo. 95 | echo.Testing of doctests in the sources finished, look at the ^ 96 | results in _build/doctest/output.txt. 97 | goto end 98 | ) 99 | 100 | :end 101 | -------------------------------------------------------------------------------- /docs/streams.txt: -------------------------------------------------------------------------------- 1 | Testing output to streams 2 | ========================= 3 | 4 | .. currentmodule:: testfixtures 5 | 6 | In many situations, it's perfectly legitimate for output to be printed 7 | to one of the standard streams. To aid with testing this kind of 8 | output, testfixtures provides the :class:`OutputCapture` helper. 9 | 10 | This helper is a context manager that captures output sent to 11 | ``sys.stdout`` and ``sys.stderr`` and provides a 12 | :meth:`~OutputCapture.compare` method to check that the output was as 13 | expected. 14 | 15 | Here's a simple example: 16 | 17 | .. code-block:: python 18 | 19 | from testfixtures import OutputCapture 20 | import sys 21 | 22 | with OutputCapture() as output: 23 | # code under test 24 | print("Hello!") 25 | print("Something bad happened!", file=sys.stderr) 26 | 27 | output.compare('\n'.join([ 28 | "Hello!", 29 | "Something bad happened!", 30 | ])) 31 | 32 | To make life easier, both the actual and expected output are stripped 33 | of leading and trailing whitespace before the comparison is done: 34 | 35 | >>> with OutputCapture() as o: 36 | ... print(' Bar! ') 37 | ... o.compare(' Foo! ') 38 | Traceback (most recent call last): 39 | ... 40 | AssertionError: 'Foo!' (expected) != 'Bar!' (actual) 41 | 42 | However, if you need to make very explicit assertions about what has 43 | been written to the stream then you can do so using the `captured` 44 | property of the :class:`OutputCapture`: 45 | 46 | >>> with OutputCapture() as o: 47 | ... print(' Bar! ') 48 | >>> print(repr(o.captured)) 49 | ' Bar! \n' 50 | 51 | If you need to explicitly check whether output went to ``stdout`` or ``stderr``, 52 | `separate` mode can be used: 53 | 54 | .. code-block:: python 55 | 56 | from testfixtures import OutputCapture 57 | import sys 58 | 59 | with OutputCapture(separate=True) as output: 60 | print("Hello!") 61 | print("Something bad happened!", file=sys.stderr) 62 | 63 | output.compare( 64 | stdout="Hello!", 65 | stderr="Something bad happened!", 66 | ) 67 | 68 | Finally, you may sometimes want to disable an :class:`OutputCapture` 69 | without removing it from your code. This often happens when you want 70 | to insert a :any:`breakpoint` call while an :class:`OutputCapture` is active; 71 | if it remains enabled, all debugger output will be captured making the 72 | debugger very difficult to use! 73 | 74 | To deal with this problem, the :class:`OutputCapture` may be disabled 75 | and then re-enabled as follows: 76 | 77 | >>> with OutputCapture() as o: 78 | ... print('Foo') 79 | ... o.disable() 80 | ... print('Bar') 81 | ... o.enable() 82 | ... print('Baz') 83 | Bar 84 | >>> print(o.captured) 85 | Foo 86 | Baz 87 | 88 | 89 | .. note:: 90 | 91 | Some debuggers, notably :mod:`pdb`, do interesting things with streams 92 | such that calling :meth:`~OutputCapture.disable` from within the debugger 93 | will have no effect. A good fallback is to type the following, which will 94 | almost always restore output to where you want it: 95 | 96 | .. code-block:: python 97 | 98 | import sys; sys.stdout=sys.__stdout__ 99 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | orbs: 4 | python: cjw296/python-ci@6 5 | 6 | jobs: 7 | check-package: 8 | parameters: 9 | image: 10 | type: string 11 | extra_package: 12 | type: string 13 | default: "" 14 | imports: 15 | type: string 16 | default: "testfixtures" 17 | docker: 18 | - image: << parameters.image >> 19 | steps: 20 | - python/check-package: 21 | package: "testfixtures" 22 | extra_packages: << parameters.extra_package >> 23 | typed: true 24 | test: 25 | - run: 26 | name: "Check Imports" 27 | command: python -c "import << parameters.imports >>" 28 | 29 | common: &common 30 | jobs: 31 | - python/pip-run-tests: 32 | matrix: 33 | parameters: 34 | image: &python_versions 35 | - cimg/python:3.11 36 | - cimg/python:3.12 37 | - cimg/python:3.13 38 | - python/pip-run-tests: 39 | name: mock-backport 40 | image: cimg/python:3.11 41 | extra_packages: "mock" 42 | - python/pip-run-tests: 43 | name: django-latest 44 | image: cimg/python:3.11 45 | extra_packages: "django" 46 | 47 | - python/typing: 48 | packages: testfixtures 49 | matrix: 50 | parameters: 51 | image: *python_versions 52 | 53 | - python/coverage: 54 | name: coverage 55 | image: cimg/python:3.11 56 | requires: 57 | - python/pip-run-tests 58 | - mock-backport 59 | - django-latest 60 | 61 | - python/pip-docs: 62 | name: docs 63 | requires: 64 | - coverage 65 | 66 | - python/pip-setuptools-build-package: 67 | name: package 68 | requires: 69 | - docs 70 | 71 | - check-package: 72 | name: check-package-python 73 | image: cimg/python:3.11 74 | requires: 75 | - package 76 | 77 | - check-package: 78 | name: check-package-python-mock 79 | image: cimg/python:3.11 80 | extra_package: mock 81 | imports: "testfixtures, testfixtures.mock" 82 | requires: 83 | - package 84 | 85 | - check-package: 86 | name: check-package-python-django 87 | image: cimg/python:3.11 88 | extra_package: django 89 | imports: "testfixtures, testfixtures.django" 90 | requires: 91 | - package 92 | 93 | - python/release: 94 | name: release 95 | config: .carthorse.yml 96 | requires: 97 | - python/typing 98 | - check-package-python 99 | - check-package-python-mock 100 | - check-package-python-django 101 | filters: 102 | branches: 103 | only: master 104 | 105 | workflows: 106 | push: 107 | <<: *common 108 | periodic: 109 | <<: *common 110 | triggers: 111 | - schedule: 112 | cron: "0 1 * * *" 113 | filters: 114 | branches: 115 | only: master 116 | -------------------------------------------------------------------------------- /testfixtures/tests/test_django/test_compare.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | import pytest 4 | from django.contrib.auth.models import User 5 | from testfixtures import OutputCapture, Replacer 6 | from .models import SampleModel 7 | from testfixtures.tests.test_django.manage import main 8 | 9 | from ..test_compare import CompareHelper 10 | from ... import compare 11 | from ...django import compare as django_compare 12 | 13 | 14 | class CompareTests(CompareHelper, TestCase): 15 | 16 | def test_simple_same(self): 17 | django_compare(SampleModel(id=1), SampleModel(id=1)) 18 | 19 | def test_simple_diff(self): 20 | self.check_raises( 21 | SampleModel(id=1), SampleModel(id=2), 22 | compare=django_compare, 23 | message=( 24 | 'SampleModel not as expected:\n' 25 | '\n' 26 | 'same:\n' 27 | "['value']\n" 28 | '\n' 29 | 'values differ:\n' 30 | "'id': 1 != 2" 31 | ) 32 | ) 33 | 34 | def test_simple_ignore_fields(self): 35 | django_compare(SampleModel(id=1), SampleModel(id=1), 36 | ignore_fields=['id']) 37 | 38 | def test_ignored_because_speshul(self): 39 | django_compare(SampleModel(not_editable=1), SampleModel(not_editable=2)) 40 | 41 | def test_ignored_because_no_longer_speshul(self): 42 | self.check_raises( 43 | SampleModel(not_editable=1), SampleModel(not_editable=2), 44 | compare=django_compare, 45 | message=( 46 | 'SampleModel not as expected:\n' 47 | '\n' 48 | 'same:\n' 49 | "['created', 'id', 'value']\n" 50 | '\n' 51 | 'values differ:\n' 52 | "'not_editable': 1 != 2" 53 | ), 54 | non_editable_fields=True 55 | ) 56 | 57 | def test_normal_compare_id_same(self): 58 | # other diffs ignored 59 | compare(SampleModel(id=1, value=1), SampleModel(id=1, value=2)) 60 | 61 | def test_normal_compare_id_diff(self): 62 | self.check_raises( 63 | SampleModel(id=3, value=1), SampleModel(id=4, value=2), 64 | compare=django_compare, 65 | message=( 66 | 'SampleModel not as expected:\n' 67 | '\n' 68 | 'values differ:\n' 69 | "'id': 3 != 4\n" 70 | "'value': 1 != 2" 71 | ) 72 | ) 73 | 74 | def test_manage(self): 75 | with OutputCapture() as output: 76 | with Replacer() as r: 77 | r.replace('os.environ.DJANGO_SETTINGS_MODULE', '', strict=False) 78 | r.replace('sys.argv', ['x', 'check']) 79 | main() 80 | output.compare('System check identified no issues (0 silenced).') 81 | 82 | @pytest.mark.django_db 83 | def test_many_to_many_same(self): 84 | user = User.objects.create(username='foo') 85 | django_compare(user, 86 | expected=User( 87 | username='foo', first_name='', last_name='', 88 | is_superuser=False 89 | ), 90 | ignore_fields=['id', 'date_joined']) 91 | -------------------------------------------------------------------------------- /testfixtures/shouldwarn.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | from types import TracebackType 3 | from typing import TypeAlias, Any 4 | 5 | from testfixtures import Comparison, SequenceComparison, compare 6 | 7 | 8 | WarningOrType: TypeAlias = Warning | type[Warning] 9 | 10 | 11 | class ShouldWarn(warnings.catch_warnings): 12 | """ 13 | This context manager is used to assert that warnings are issued 14 | within the context it is managing. 15 | 16 | :param expected: This should be a sequence made up of one or more elements, 17 | each of one of the following types: 18 | 19 | * A warning class, indicating that the type 20 | of the warnings is important but not the 21 | parameters it is created with. 22 | 23 | * A warning instance, indicating that a 24 | warning exactly matching the one supplied 25 | should have been issued. 26 | 27 | If no expected warnings are passed, you will need to inspect 28 | the contents of the list returned by the context manager. 29 | 30 | 31 | :param order_matters: 32 | 33 | A keyword-only parameter that controls whether the order of the 34 | captured entries is required to match those of the expected entries. 35 | Defaults to ``True``. 36 | 37 | :param filters: 38 | If passed, these are used to create a filter such that only warnings you 39 | are interested in will be considered by this :class:`ShouldWarn` 40 | instance. The names and meanings are the same as the parameters for 41 | :func:`warnings.filterwarnings`. 42 | 43 | """ 44 | 45 | _empty_okay = False 46 | recorded: list[warnings.WarningMessage] 47 | 48 | def __init__( 49 | self, *expected: WarningOrType, order_matters: bool = True, **filters: Any 50 | ) -> None: 51 | super(ShouldWarn, self).__init__(record=True) 52 | self.order_matters = order_matters 53 | self.expected = [Comparison(e) for e in expected] 54 | self.filters = filters 55 | 56 | def __enter__(self) -> list[warnings.WarningMessage]: 57 | # We pass `record=True` above, so the following will *always* return a list: 58 | self.recorded = super(ShouldWarn, self).__enter__() # type: ignore[assignment] 59 | warnings.filterwarnings("always", **self.filters) 60 | return self.recorded 61 | 62 | def __exit__( 63 | self, 64 | exc_type: type[BaseException] | None, 65 | exc_val: BaseException | None, 66 | exc_tb: TracebackType | None, 67 | ) -> None: 68 | super(ShouldWarn, self).__exit__(exc_type, exc_val, exc_tb) 69 | if not self.recorded and self._empty_okay: 70 | return 71 | if not self.expected and self.recorded and not self._empty_okay: 72 | return 73 | compare( 74 | expected=SequenceComparison(*self.expected, ordered=self.order_matters), 75 | actual=[wm.message for wm in self.recorded] 76 | ) 77 | 78 | 79 | class ShouldNotWarn(ShouldWarn): 80 | """ 81 | This context manager is used to assert that no warnings are issued 82 | within the context it is managing. 83 | """ 84 | 85 | _empty_okay = True 86 | 87 | def __init__(self) -> None: 88 | super(ShouldNotWarn, self).__init__() 89 | -------------------------------------------------------------------------------- /testfixtures/django.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Sequence, Iterable 2 | 3 | from django.db.models import Model, Field 4 | 5 | from . import compare as base_compare 6 | from .comparison import ( 7 | _compare_mapping, 8 | register, 9 | CompareContext, 10 | unspecified, 11 | Comparers, 12 | ) 13 | 14 | 15 | def instance_fields(instance: Model) -> Iterable[Field]: 16 | opts = instance._meta 17 | for name in ( 18 | 'concrete_fields', 19 | 'virtual_fields', 20 | 'private_fields', 21 | ): 22 | fields = getattr(opts, name, None) 23 | if fields: 24 | for field in fields: 25 | yield field 26 | 27 | 28 | def model_to_dict( 29 | instance: Model, 30 | exclude: Sequence[str], 31 | include_not_editable: bool, 32 | ) -> dict[str, Any]: 33 | data = {} 34 | for f in instance_fields(instance): 35 | if f.name in exclude: 36 | continue 37 | if not getattr(f, 'editable', False) and not include_not_editable: 38 | continue 39 | data[f.name] = f.value_from_object(instance) 40 | return data 41 | 42 | 43 | def compare_model( 44 | x: Model, 45 | y: Model, 46 | context: CompareContext, 47 | ignore_fields: Sequence[str] = (), 48 | non_editable_fields: bool = False, 49 | ) -> str | None: 50 | """ 51 | Returns an informative string describing the differences between the two 52 | supplied Django model instances. The way in which this comparison is 53 | performed can be controlled using the following parameters: 54 | 55 | :param ignore_fields: 56 | A sequence of fields to ignore during comparison, most commonly 57 | set to ``['id']``. By default, no fields are ignored. 58 | 59 | :param non_editable_fields: 60 | If `True`, then fields with ``editable=False`` will be included in the 61 | comparison. By default, these fields are ignored. 62 | """ 63 | args: Any = [] 64 | for obj in x, y: 65 | args.append(model_to_dict(obj, ignore_fields, non_editable_fields)) 66 | args.append(context) 67 | args.append(x) 68 | return _compare_mapping(*args) 69 | 70 | 71 | register(Model, compare_model) 72 | 73 | 74 | def compare( 75 | *args: Any, 76 | x: Any = unspecified, 77 | y: Any = unspecified, 78 | expected: Any = unspecified, 79 | actual: Any = unspecified, 80 | prefix: str | None = None, 81 | suffix: str | None = None, 82 | x_label: str | None = None, 83 | y_label: str | None = None, 84 | raises: bool = True, 85 | recursive: bool = True, 86 | strict: bool = False, 87 | ignore_eq: bool = True, 88 | comparers: Comparers | None = None, 89 | **options: Any 90 | ) -> str | None: 91 | """ 92 | This is identical to :func:`~testfixtures.compare`, but with ``ignore=True`` 93 | automatically set to make comparing django :class:`~django.db.models.Model` 94 | instances easier. 95 | """ 96 | return base_compare( 97 | *args, 98 | x=x, 99 | y=y, 100 | expected=expected, 101 | actual=actual, 102 | prefix=prefix, 103 | suffix=suffix, 104 | x_label=x_label, 105 | y_label=y_label, 106 | raises=raises, 107 | recursive=recursive, 108 | strict=strict, 109 | ignore_eq=ignore_eq, 110 | comparers=comparers, 111 | **options 112 | ) 113 | -------------------------------------------------------------------------------- /docs/warnings.txt: -------------------------------------------------------------------------------- 1 | Testing warnings 2 | ================ 3 | 4 | .. currentmodule:: testfixtures 5 | 6 | Testfixtures has tools that make it easy to make assertions about code that may emit warnings. 7 | 8 | The :class:`ShouldWarn` context manager 9 | --------------------------------------- 10 | 11 | This context manager allows you to assert that particular warnings are 12 | emitted in a block of code, for example: 13 | 14 | >>> from warnings import warn 15 | >>> from testfixtures import ShouldWarn 16 | >>> with ShouldWarn(UserWarning('you should fix that')): 17 | ... warn('you should fix that') 18 | 19 | If a warning issued doesn't match the one expected, 20 | :class:`ShouldWarn` will raise an :class:`AssertionError` 21 | causing the test in which it occurs to fail: 22 | 23 | >>> from warnings import warn 24 | >>> from testfixtures import ShouldWarn 25 | >>> with ShouldWarn(UserWarning('you should fix that')): 26 | ... warn("sorry dave, I can't let you do that") 27 | Traceback (most recent call last): 28 | ... 29 | AssertionError:... 30 | 31 | same: 32 | [] 33 | 34 | expected: 35 | [ 36 | 37 | attributes differ: 38 | 'args': ('you should fix that',) (Comparison) != ("sorry dave, I can't let you do that",) (actual) 39 | ] 40 | 41 | actual: 42 | [UserWarning("sorry dave, I can't let you do that"...)] 43 | (expected) != [UserWarning("sorry dave, I can't let you do that"...)] (actual) 44 | 45 | You can check multiple warnings in a particular piece of code: 46 | 47 | >>> from warnings import warn 48 | >>> from testfixtures import ShouldWarn 49 | >>> with ShouldWarn(UserWarning('you should fix that'), 50 | ... UserWarning('and that too')): 51 | ... warn('you should fix that') 52 | ... warn('and that too') 53 | 54 | If you don't care about the order of issued warnings, you can use ``order_matters=False``: 55 | 56 | >>> from warnings import warn 57 | >>> from testfixtures import ShouldWarn 58 | >>> with ShouldWarn(UserWarning('you should fix that'), 59 | ... UserWarning('and that too'), 60 | ... order_matters=False): 61 | ... warn('and that too') 62 | ... warn('you should fix that') 63 | 64 | If you want to inspect more details of the warnings issued, you can capture 65 | them into a list as follows: 66 | 67 | >>> from warnings import warn_explicit 68 | >>> from testfixtures import ShouldWarn 69 | >>> with ShouldWarn() as captured: 70 | ... warn_explicit(message='foo', category=DeprecationWarning, 71 | ... filename='bar.py', lineno=42) 72 | >>> len(captured) 73 | 1 74 | >>> captured[0].message 75 | DeprecationWarning('foo'...) 76 | >>> captured[0].lineno 77 | 42 78 | 79 | The :class:`ShouldNotWarn` context manager 80 | ------------------------------------------ 81 | 82 | If you do not expect any warnings to be logged in a piece of code, you can use 83 | the :class:`ShouldNotWarn` context manager. If any warnings are issued in the 84 | context it manages, it will raise an :class:`AssertionError` to indicate this: 85 | 86 | >>> from warnings import warn 87 | >>> from testfixtures import ShouldNotWarn 88 | >>> with ShouldNotWarn(): 89 | ... warn("woah dude") 90 | Traceback (most recent call last): 91 | ... 92 | AssertionError:... 93 | 94 | same: 95 | [] 96 | 97 | expected: 98 | [] 99 | 100 | actual: 101 | [UserWarning('woah dude'...)] 102 | (expected) != [UserWarning('woah dude'...)] (actual) 103 | -------------------------------------------------------------------------------- /testfixtures/utils.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from functools import wraps 3 | from textwrap import dedent 4 | 5 | from inspect import getfullargspec 6 | from types import TracebackType 7 | from typing import Callable, Sequence, Any, Generator, TypeVar, Generic, ParamSpec, TypeAlias 8 | 9 | from .mock import DEFAULT, _Sentinel 10 | 11 | 12 | def generator(*args: Any) -> Generator[Any, None, None]: 13 | """ 14 | A utility function for creating a generator that will yield the 15 | supplied arguments. 16 | """ 17 | for i in args: 18 | yield i 19 | 20 | T = TypeVar("T") 21 | 22 | 23 | class Wrapping(Generic[T]): 24 | 25 | attribute_name = None 26 | new = DEFAULT 27 | 28 | def __init__(self, before: Callable[[], T], after: Callable[[], None] | None): 29 | self.before, self.after = before, after 30 | 31 | def __enter__(self) -> T: 32 | return self.before() 33 | 34 | def __exit__( 35 | self, 36 | exc_type: type[BaseException] | None, 37 | exc_val: BaseException | None, 38 | exc_tb: TracebackType | None, 39 | ) -> None: 40 | if self.after is not None: 41 | self.after() 42 | 43 | ExcInfo: TypeAlias = tuple[type[BaseException] | None, BaseException | None, TracebackType | None] 44 | P = ParamSpec("P") 45 | U = TypeVar("U") 46 | 47 | 48 | def wrap( 49 | before: Callable[[], T], after: Callable[[], None] | None = None 50 | ) -> Callable[[Callable[P, U]], Callable[P, U]]: 51 | """ 52 | A decorator that causes the supplied callables to be called before 53 | or after the wrapped callable, as appropriate. 54 | """ 55 | 56 | wrapping = Wrapping(before, after) 57 | 58 | def wrapper(func: Callable[P, U]) -> Callable[P, U]: 59 | if hasattr(func, 'patchings'): 60 | func.patchings.append(wrapping) 61 | return func 62 | 63 | @wraps(func) 64 | def patched(*args: P.args, **keywargs: P.kwargs) -> U: 65 | extra_args = [] 66 | entered_patchers = [] 67 | 68 | to_add = len(getfullargspec(func).args[len(args):]) 69 | added = 0 70 | 71 | exc_info: ExcInfo = (None, None, None) 72 | try: 73 | for patching in patched.patchings: # type: ignore[attr-defined] 74 | arg = patching.__enter__() 75 | entered_patchers.append(patching) 76 | if patching.attribute_name is not None: 77 | keywargs.update(arg) 78 | elif patching.new is DEFAULT and added < to_add: 79 | extra_args.append(arg) 80 | added += 1 81 | 82 | args += tuple(extra_args) # type: ignore[assignment] 83 | return func(*args, **keywargs) 84 | except: 85 | # Pass the exception to __exit__ 86 | exc_info = sys.exc_info() 87 | # re-raise the exception 88 | raise 89 | finally: 90 | for patching in reversed(entered_patchers): 91 | patching.__exit__(*exc_info) 92 | 93 | patched.patchings = [wrapping] # type: ignore[attr-defined] 94 | return patched 95 | 96 | return wrapper 97 | 98 | 99 | def extend_docstring(docstring: str, objs: Sequence) -> None: 100 | for obj in objs: 101 | obj.__doc__ = dedent(obj.__doc__) + docstring 102 | 103 | 104 | def indent(text: str, indent_size: int = 2) -> str: 105 | indented = [] 106 | for do_indent, line in enumerate(text.splitlines(True)): 107 | if do_indent: 108 | line = ' '*indent_size + line 109 | indented.append(line) 110 | return ''.join(indented) 111 | -------------------------------------------------------------------------------- /testfixtures/tests/test_tempdir.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | from testfixtures.shouldraise import ShouldAssert 5 | from testfixtures.mock import Mock 6 | from tempfile import mkdtemp 7 | from testfixtures import Replacer, TempDirectory, compare, tempdir 8 | from unittest import TestCase 9 | 10 | from ..rmtree import rmtree 11 | 12 | 13 | class TestTempDir(TestCase): 14 | 15 | @tempdir() 16 | def test_simple(self, d): 17 | d.write('something', b'stuff') 18 | d.write('.svn', b'stuff') 19 | d.compare(( 20 | '.svn', 21 | 'something', 22 | )) 23 | 24 | @tempdir() 25 | def test_subdirs(self, d): 26 | subdir = ['some', 'thing'] 27 | d.write(subdir+['something'], b'stuff') 28 | d.write(subdir+['.svn'], b'stuff') 29 | d.compare(path=subdir, expected=( 30 | '.svn', 31 | 'something', 32 | )) 33 | 34 | @tempdir() 35 | def test_not_same(self, d): 36 | d.write('something', b'stuff') 37 | 38 | with ShouldAssert( 39 | "sequence not as expected:\n" 40 | "\n" 41 | "same:\n" 42 | "()\n" 43 | "\n" 44 | "expected:\n" 45 | "('.svn', 'something')\n" 46 | "\n" 47 | "actual:\n" 48 | "('something',)" 49 | ): 50 | d.compare(['.svn', 'something']) 51 | 52 | @tempdir(ignore=('.svn', )) 53 | def test_ignore(self, d): 54 | d.write('something', b'stuff') 55 | d.write('.svn', b'stuff') 56 | d.compare(['something']) 57 | 58 | def test_cleanup_properly(self): 59 | r = Replacer() 60 | try: 61 | m = Mock() 62 | d = mkdtemp() 63 | m.return_value = d 64 | r.replace('testfixtures.tempdirectory.mkdtemp', m) 65 | 66 | self.assertTrue(os.path.exists(d)) 67 | 68 | self.assertFalse(m.called) 69 | 70 | @tempdir() 71 | def test_method(d): 72 | d.write('something', b'stuff') 73 | d.compare(['something']) 74 | 75 | self.assertFalse(m.called) 76 | compare(os.listdir(d), []) 77 | 78 | test_method() 79 | 80 | self.assertTrue(m.called) 81 | self.assertFalse(os.path.exists(d)) 82 | 83 | finally: 84 | r.restore() 85 | if os.path.exists(d): 86 | # only runs if the test fails! 87 | rmtree(d) # pragma: no cover 88 | 89 | @tempdir() 90 | def test_cleanup_test_okay_with_deleted_dir(self, d): 91 | rmtree(d.path) 92 | 93 | @tempdir() 94 | def test_decorator_returns_tempdirectory(self, d): 95 | # check for what we get, so we only have to write 96 | # tests in test_tempdirectory.py 97 | self.assertTrue(isinstance(d, TempDirectory)) 98 | 99 | def test_dont_create_or_cleanup_with_path(self): 100 | with Replacer() as r: 101 | m = Mock() 102 | r.replace('testfixtures.tempdirectory.mkdtemp', m) 103 | r.replace('testfixtures.tempdirectory.rmtree', m) 104 | 105 | @tempdir(path='foo') 106 | def test_method(d): 107 | compare(d.path, 'foo') 108 | 109 | test_method() 110 | 111 | self.assertFalse(m.called) 112 | 113 | def test_cwd_directory(self): 114 | @tempdir(cwd=True) 115 | def test_method(d): 116 | compare(Path(os.getcwd()).resolve(), expected=Path(d.path).resolve()) 117 | 118 | original = os.getcwd() 119 | try: 120 | test_method() 121 | compare(Path(os.getcwd()).resolve(), expected=Path(original).resolve()) 122 | finally: 123 | os.chdir(original) 124 | -------------------------------------------------------------------------------- /AGENTS.md: -------------------------------------------------------------------------------- 1 | This file provides guidance to LLM tools such as [aider](https://aider.chat/) 2 | and [Claude Code](claude.ai/code) when working with code in this repository. 3 | 4 | ## Project Overview 5 | 6 | Testfixtures is a Python testing utilities library that provides helpers and mock objects for automated testing. It includes tools for comparing objects, mocking, logging capture, stream output testing, file/directory testing, exception/warning testing, Django support, and Twisted support. 7 | 8 | ## Development Commands 9 | 10 | ### Environment 11 | 12 | Always work in a virtualenv contained in `.venv` in the checkout: 13 | 14 | ```bash 15 | source .venv/bin/activate 16 | ``` 17 | 18 | If the environment doesn't exist, create it as follows: 19 | 20 | ```bash 21 | python3 -m venv .venv 22 | source .venv/bin/activate 23 | pip install -U pip setuptools 24 | pip install -U -e .[test,build,docs] 25 | ``` 26 | 27 | ### Running Tests 28 | ```bash 29 | pytest # Run all tests 30 | pytest testfixtures/tests/test_comparison.py # Run specific test file 31 | ``` 32 | 33 | ### Type Checking 34 | ```bash 35 | mypy testfixtures/ # Run type checking with mypy 36 | ``` 37 | **CRITICAL**: Always run mypy after code changes. All type checks must pass. 38 | 39 | ### Coverage 40 | ```bash 41 | pytest --cov=testfixtures --cov-report=term-missing # Run with coverage 42 | ``` 43 | **CRITICAL**: Always run coverage after code changes. Coverage must not drop below baseline. 44 | 45 | ### Documentation 46 | ```bash 47 | cd docs && make html # Build HTML documentation 48 | cd docs && make clean # Clean documentation build 49 | ``` 50 | 51 | ### Package Installation 52 | ```bash 53 | pip install -e .[test,build] # Install in development mode with test dependencies 54 | ``` 55 | 56 | ## Architecture 57 | 58 | ### Core Components 59 | 60 | - **comparison.py**: Core comparison functionality including `compare()`, `diff()`, and various comparison classes (`Comparison`, `StringComparison`, `RoundComparison`, etc.) 61 | - **replace.py**: Mocking and replacement functionality via `Replacer` class and `replace()` decorators 62 | - **logcapture.py**: Logging capture via `LogCapture` class for testing logged output 63 | - **datetime.py**: Date/time mocking utilities (`mock_datetime`, `mock_date`, `mock_time`) 64 | - **tempdirectory.py**: Temporary directory management for file system testing 65 | - **shouldraise.py**: Exception testing utilities (`ShouldRaise`, `should_raise`) 66 | - **shouldwarn.py**: Warning testing utilities (`ShouldWarn`, `ShouldNotWarn`) 67 | 68 | ### Key Features 69 | 70 | 1. **Object Comparison**: Enhanced comparison with detailed diff output for complex nested structures 71 | 2. **Mocking System**: Comprehensive replacement/mocking system for objects, methods, and modules 72 | 3. **Logging Testing**: Capture and assert on logging output 73 | 4. **File System Testing**: Temporary directories and file operations 74 | 5. **Time Mocking**: Mock datetime, date, and time objects 75 | 6. **Exception/Warning Testing**: Assert on raised exceptions and warnings 76 | 77 | ### Module Organization 78 | 79 | - Main API exports are in `__init__.py` 80 | - Each major feature has its own module (comparison, replace, logcapture, etc.) 81 | - Tests are in `testfixtures/tests/` with comprehensive coverage 82 | - Django-specific functionality in `django.py` 83 | - Twisted-specific functionality in `twisted.py` 84 | 85 | ## Configuration 86 | 87 | - **pytest.ini**: Test configuration with Django settings module 88 | - **mypy.ini**: Type checking configuration with Django and Zope plugins 89 | - **setup.py**: Package configuration with optional dependencies for Django, Sybil, and Twisted 90 | 91 | ## Testing Notes 92 | 93 | - Tests use pytest framework 94 | - Django tests require `DJANGO_SETTINGS_MODULE=testfixtures.tests.test_django.settings` 95 | - Type checking excludes some modules (`testfixtures.datetime`, test modules) 96 | - Supports Python 3.11+ 97 | -------------------------------------------------------------------------------- /docs/utilities.txt: -------------------------------------------------------------------------------- 1 | Utilities 2 | ========= 3 | 4 | .. currentmodule:: testfixtures 5 | 6 | This section describes a few handy functions that didn't fit nicely in 7 | any other section. 8 | 9 | .. _generator: 10 | 11 | The ``generator`` helper 12 | ------------------------ 13 | 14 | It can be handy when testing to be able to turn a simple sequence into 15 | a generator. This can be necessary when you want to check that your 16 | code will behave correctly when processing a generator instead of a 17 | simple sequence, or when you're looking to make assertions about the 18 | expected return value of a callable that returns a generator. 19 | 20 | If you need to turn a simple sequence into a generator, the 21 | :func:`generator` function is the way to do it: 22 | 23 | >>> from testfixtures import generator 24 | >>> generator(1,2,3) 25 | 26 | 27 | Iterating over this generator will return the arguments passed to the 28 | :func:`generator` function: 29 | 30 | >>> for i in _: 31 | ... print(i, end=' ') 32 | 1 2 3 33 | 34 | The ``wrap`` helper 35 | ------------------- 36 | 37 | The :func:`wrap` helper is a decorator function that allows you to 38 | wrap the call to the decorated callable with calls to other 39 | callables. This can be useful when you want to perform setup and 40 | teardown actions either side of a test function. 41 | 42 | For example, take the following functions: 43 | 44 | .. code-block:: python 45 | 46 | def before(): 47 | print("before") 48 | 49 | def after(): 50 | print("after") 51 | 52 | The :func:`wrap` helper can be used to wrap a function with these: 53 | 54 | .. code-block:: python 55 | 56 | from testfixtures import wrap 57 | 58 | @wrap(before, after) 59 | def a_function(): 60 | print("a_function") 61 | 62 | When the wrapped function is executed, the output is as follows: 63 | 64 | >>> a_function() 65 | before 66 | a_function 67 | after 68 | 69 | The section argument to :func:`wrap` is optional: 70 | 71 | .. code-block:: python 72 | 73 | from testfixtures import wrap 74 | 75 | @wrap(before) 76 | def a_function(): 77 | print("a_function") 78 | 79 | Now, the wrapped function gives the following output when executed: 80 | 81 | >>> a_function() 82 | before 83 | a_function 84 | 85 | Multiple wrapping functions can be provided by stacking :func:`wrap` 86 | decorations: 87 | 88 | .. code-block:: python 89 | 90 | def before1(): 91 | print("before 1") 92 | 93 | def after1(): 94 | print("after 1") 95 | 96 | def before2(): 97 | print("before 2") 98 | 99 | def after2(): 100 | print("after 2") 101 | 102 | @wrap(before2, after2) 103 | @wrap(before1, after1) 104 | def a_function(): 105 | print("a_function") 106 | 107 | The order of execution is illustrated below: 108 | 109 | >>> a_function() 110 | before 1 111 | before 2 112 | a_function 113 | after 2 114 | after 1 115 | 116 | The results of calling the wrapping functions executed before the 117 | wrapped function can be made available to the wrapped function 118 | provided it accepts positional arguments for these results: 119 | 120 | .. code-block:: python 121 | 122 | def before1(): 123 | return "return 1" 124 | 125 | def before2(): 126 | return "return 2" 127 | 128 | @wrap(before2) 129 | @wrap(before1) 130 | def a_function(r1, r2): 131 | print(r1) 132 | print(r2) 133 | 134 | Calling the wrapped function illustrates the behaviour: 135 | 136 | >>> a_function() 137 | return 1 138 | return 2 139 | 140 | Finally, the return value of the wrapped function will always be that 141 | of the original function: 142 | 143 | .. code-block:: python 144 | 145 | def before1(): 146 | return 1 147 | 148 | def after1(): 149 | return 2 150 | 151 | def before2(): 152 | return 3 153 | 154 | def after2(): 155 | return 4 156 | 157 | @wrap(before2, after2) 158 | @wrap(before1, after2) 159 | def a_function(): 160 | return 'original' 161 | 162 | When the above wrapped function is executed, the original return value 163 | is still returned: 164 | 165 | >>> a_function() 166 | 'original' 167 | -------------------------------------------------------------------------------- /testfixtures/tests/test_replacer.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from testfixtures import Replacer, ShouldRaise 4 | 5 | 6 | class TestReplacer(TestCase): 7 | 8 | def test_function(self): 9 | from testfixtures.tests import sample1 10 | assert sample1.z() == 'original z' 11 | 12 | def test_z(): 13 | return 'replacement z' 14 | 15 | r = Replacer() 16 | r.replace('testfixtures.tests.sample1.z',test_z) 17 | 18 | assert sample1.z() == 'replacement z' 19 | 20 | r.restore() 21 | 22 | assert sample1.z() == 'original z' 23 | 24 | def test_class(self): 25 | from testfixtures.tests import sample1 26 | x = sample1.X() 27 | assert x.__class__.__name__ == 'X' 28 | 29 | class XReplacement(sample1.X): pass 30 | 31 | r = Replacer() 32 | r.replace('testfixtures.tests.sample1.X', XReplacement) 33 | 34 | x = sample1.X() 35 | assert x.__class__.__name__ == 'XReplacement' 36 | assert sample1.X().y() == 'original y' 37 | 38 | r.restore() 39 | 40 | x = sample1.X() 41 | assert x.__class__.__name__ == 'X' 42 | 43 | def test_method(self): 44 | from testfixtures.tests import sample1 45 | assert sample1.X().y() == 'original y' 46 | 47 | def test_y(self): 48 | return 'replacement y' 49 | 50 | r = Replacer() 51 | r.replace('testfixtures.tests.sample1.X.y',test_y) 52 | 53 | assert sample1.X().y()[:38] == 'replacement y' 54 | 55 | r.restore() 56 | 57 | assert sample1.X().y() == 'original y' 58 | 59 | def test_class_method(self): 60 | from testfixtures.tests import sample1 61 | c = sample1.X 62 | assert sample1.X.aMethod() is c 63 | 64 | def rMethod(cls): 65 | return cls, 1 66 | 67 | r = Replacer() 68 | r.replace('testfixtures.tests.sample1.X.aMethod',rMethod) 69 | 70 | sample1.X.aMethod() 71 | assert sample1.X.aMethod() == (c, 1) 72 | 73 | r.restore() 74 | 75 | sample1.X.aMethod() 76 | assert sample1.X.aMethod() is c 77 | 78 | def test_multiple_replace(self): 79 | from testfixtures.tests import sample1 80 | assert sample1.z() == 'original z' 81 | assert sample1.X().y() == 'original y' 82 | 83 | def test_y(self): 84 | return self.__class__.__name__ 85 | def test_z(): 86 | return 'replacement z' 87 | 88 | r = Replacer() 89 | r.replace('testfixtures.tests.sample1.z',test_z) 90 | r.replace('testfixtures.tests.sample1.X.y',test_y) 91 | 92 | assert sample1.z() == 'replacement z' 93 | assert sample1.X().y() == 'X' 94 | 95 | r.restore() 96 | 97 | assert sample1.z() == 'original z' 98 | assert sample1.X().y() == 'original y' 99 | 100 | def test_gotcha(self): 101 | # Just because you replace an object in one context: 102 | 103 | from testfixtures.tests import sample1 104 | from testfixtures.tests import sample2 105 | assert sample1.z() == 'original z' 106 | 107 | def test_z(): 108 | return 'replacement z' 109 | 110 | r = Replacer() 111 | r.replace('testfixtures.tests.sample1.z',test_z) 112 | 113 | assert sample1.z() == 'replacement z' 114 | 115 | # Doesn't meant that it's replaced in all contexts: 116 | 117 | assert sample2.z() == 'original z' 118 | 119 | r.restore() 120 | 121 | def test_remove_called_twice(self): 122 | from testfixtures.tests import sample1 123 | 124 | def test_z(): pass 125 | 126 | r = Replacer() 127 | r.replace('testfixtures.tests.sample1.z',test_z) 128 | 129 | r.restore() 130 | assert sample1.z() == 'original z' 131 | 132 | r.restore() 133 | assert sample1.z() == 'original z' 134 | 135 | def test_with_statement(self): 136 | from testfixtures.tests import sample1 137 | assert sample1.z() == 'original z' 138 | 139 | def test_z(): 140 | return 'replacement z' 141 | 142 | with Replacer() as r: 143 | r.replace('testfixtures.tests.sample1.z',test_z) 144 | assert sample1.z() == 'replacement z' 145 | 146 | assert sample1.z() == 'original z' 147 | 148 | def test_not_there(self): 149 | def test_bad(): pass 150 | 151 | with Replacer() as r: 152 | with ShouldRaise(AttributeError("Original 'bad' not found")): 153 | r.replace('testfixtures.tests.sample1.bad', test_bad) 154 | -------------------------------------------------------------------------------- /testfixtures/tests/test_compare_typed.py: -------------------------------------------------------------------------------- 1 | # Tests that ensure compare and Comparison work correctly in strictly type checked environments 2 | from dataclasses import dataclass 3 | 4 | from testfixtures import compare, Comparison 5 | from testfixtures.comparison import like, sequence, contains, unordered 6 | 7 | 8 | @dataclass 9 | class SampleClass: 10 | x: int 11 | y: str 12 | 13 | 14 | class OtherClass: 15 | pass 16 | 17 | 18 | @dataclass 19 | class ListCollection: 20 | items: list[SampleClass] 21 | 22 | 23 | @dataclass 24 | class TupleCollection: 25 | items: tuple[SampleClass, ...] 26 | 27 | 28 | def test_simple_compare() -> None: 29 | compare(SampleClass(1, '2'), expected=SampleClass(1, '2')) 30 | 31 | 32 | def test_comparison_bad_typing_in_list() -> None: 33 | expected: list[SampleClass] = [ 34 | Comparison(SampleClass, x=1, partial=True) # type: ignore[list-item] 35 | ] 36 | compare(expected, actual=[SampleClass(1, '2')]) 37 | 38 | 39 | def test_comparison_via_like_in_list() -> None: 40 | expected: list[SampleClass] = [like(SampleClass, x=1)] 41 | compare(expected, actual=[SampleClass(1, '2')]) 42 | 43 | 44 | def test_comparison_via_like_in_assert() -> None: 45 | expected: SampleClass = like(SampleClass) 46 | assert expected == SampleClass(1, '2') 47 | assert expected == SampleClass(3, '4') 48 | assert expected != SampleClass 49 | assert expected != OtherClass() 50 | 51 | 52 | class TestSequence: 53 | def test_minimal(self) -> None: 54 | actual = ListCollection([SampleClass(1, '2')]) 55 | compare(actual, expected=ListCollection(sequence()([SampleClass(1, '2')]))) 56 | 57 | def test_maximal(self) -> None: 58 | actual = ListCollection( 59 | [SampleClass(1, 'x'), SampleClass(2, 'x'), SampleClass(3, 'x')] 60 | ) 61 | compare( 62 | actual, 63 | expected=ListCollection( 64 | sequence(partial=True, ordered=False, recursive=False)( 65 | [ 66 | SampleClass(3, 'x'), 67 | SampleClass(2, 'x'), 68 | ], 69 | ) 70 | ), 71 | ) 72 | 73 | def test_minimal_type_override(self) -> None: 74 | actual = TupleCollection((SampleClass(1, '2'),)) 75 | compare( 76 | actual, 77 | expected=TupleCollection( 78 | sequence(returns=tuple[SampleClass, ...])([SampleClass(1, '2')]) 79 | ), 80 | ) 81 | 82 | def test_maximal_type_override(self) -> None: 83 | actual = TupleCollection( 84 | ( 85 | SampleClass(1, 'x'), 86 | SampleClass(2, 'x'), 87 | SampleClass(3, 'x'), 88 | ) 89 | ) 90 | compare( 91 | actual, 92 | expected=TupleCollection( 93 | sequence( 94 | partial=True, 95 | ordered=False, 96 | recursive=False, 97 | returns=tuple[SampleClass, ...], 98 | )( 99 | [ 100 | SampleClass(3, 'x'), 101 | SampleClass(2, 'x'), 102 | ] 103 | ) 104 | ), 105 | ) 106 | 107 | 108 | class TestContains: 109 | def test_it(self) -> None: 110 | actual = ListCollection([SampleClass(1, "2"), SampleClass(3, "4")]) 111 | compare(actual, expected=ListCollection(contains([SampleClass(1, "2")]))) 112 | 113 | def test_type_override(self) -> None: 114 | actual = TupleCollection((SampleClass(1, "2"), SampleClass(3, "4"))) 115 | compare( 116 | actual, 117 | expected=TupleCollection( 118 | contains([SampleClass(1, "2")], returns=tuple[SampleClass, ...]) 119 | ), 120 | ) 121 | 122 | 123 | class TestUnordered: 124 | def test_it(self) -> None: 125 | actual = ListCollection([SampleClass(2, "x"), SampleClass(1, "x")]) 126 | compare( 127 | actual, 128 | expected=ListCollection( 129 | unordered([SampleClass(1, "x"), SampleClass(2, "x")]) 130 | ), 131 | ) 132 | 133 | def test_type_override(self) -> None: 134 | actual = TupleCollection((SampleClass(2, "x"), SampleClass(1, "x"))) 135 | compare( 136 | actual, 137 | expected=TupleCollection( 138 | unordered( 139 | [SampleClass(1, "x"), SampleClass(2, "x")], 140 | returns=tuple[SampleClass, ...], 141 | ) 142 | ), 143 | ) 144 | -------------------------------------------------------------------------------- /docs/api.txt: -------------------------------------------------------------------------------- 1 | API Reference 2 | ============= 3 | 4 | .. currentmodule:: testfixtures 5 | 6 | 7 | Comparisons 8 | ----------- 9 | 10 | .. autofunction:: compare 11 | 12 | .. autoclass:: Comparison 13 | 14 | .. autofunction:: testfixtures.like 15 | 16 | .. autoclass:: MappingComparison 17 | :members: 18 | 19 | .. autoclass:: Permutation 20 | :members: 21 | 22 | .. autoclass:: RoundComparison 23 | :members: 24 | 25 | .. autoclass:: RangeComparison 26 | :members: 27 | 28 | .. autoclass:: SequenceComparison 29 | :members: 30 | 31 | .. autofunction:: testfixtures.sequence 32 | 33 | .. autofunction:: testfixtures.contains 34 | 35 | .. autofunction:: testfixtures.unordered 36 | 37 | .. autoclass:: Subset 38 | :members: 39 | 40 | .. autoclass:: StringComparison 41 | :members: 42 | 43 | 44 | testfixtures.comparison 45 | ~~~~~~~~~~~~~~~~~~~~~~~ 46 | 47 | .. automodule:: testfixtures.comparison 48 | 49 | .. autofunction:: testfixtures.comparison.register 50 | 51 | .. autofunction:: testfixtures.comparison.compare_simple 52 | 53 | .. autofunction:: testfixtures.comparison.compare_object 54 | 55 | .. autofunction:: testfixtures.comparison.merge_ignored_attributes 56 | 57 | .. autofunction:: testfixtures.comparison.compare_exception 58 | 59 | .. autofunction:: testfixtures.comparison.compare_exception_group 60 | 61 | .. autofunction:: testfixtures.comparison.compare_with_type 62 | 63 | .. autofunction:: testfixtures.comparison.compare_sequence 64 | 65 | .. autofunction:: testfixtures.comparison.compare_generator 66 | 67 | .. autofunction:: testfixtures.comparison.compare_tuple 68 | 69 | .. autofunction:: testfixtures.comparison.compare_dict 70 | 71 | .. autofunction:: testfixtures.comparison.compare_set 72 | 73 | .. autofunction:: testfixtures.comparison.compare_text 74 | 75 | .. autoclass:: testfixtures.comparison.CompareContext 76 | 77 | .. currentmodule:: testfixtures 78 | 79 | Capturing 80 | --------- 81 | 82 | .. autoclass:: LogCapture 83 | :members: 84 | 85 | .. autofunction:: log_capture 86 | 87 | .. autoclass:: OutputCapture 88 | :members: 89 | 90 | Mocking 91 | ------- 92 | .. autoclass:: Replace 93 | :members: 94 | 95 | .. autofunction:: replace_in_environ 96 | 97 | .. autofunction:: replace_on_class 98 | 99 | .. autofunction:: replace_in_module 100 | 101 | .. autoclass:: Replacer 102 | :members: 103 | :special-members: __call__ 104 | 105 | .. autofunction:: replace 106 | 107 | .. autofunction:: mock_date(year=2001, month=1, day=1, delta=None, delta_type='days', strict=False) 108 | 109 | .. autoclass:: testfixtures.datetime.MockDate 110 | :members: 111 | :member-order: bysource 112 | 113 | .. autofunction:: mock_datetime(year=2001, month=1, day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, delta=None, delta_type='seconds', date_type=datetime.date, strict=False) 114 | 115 | .. autoclass:: testfixtures.datetime.MockDateTime 116 | :members: 117 | :member-order: bysource 118 | 119 | .. autofunction:: mock_time(year=2001, month=1, day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, delta=None, delta_type='seconds') 120 | 121 | .. autoclass:: testfixtures.datetime.MockTime 122 | :members: 123 | :special-members: __new__ 124 | :member-order: bysource 125 | 126 | testfixtures.mock 127 | ~~~~~~~~~~~~~~~~~ 128 | 129 | .. automodule:: testfixtures.mock 130 | 131 | 132 | testfixtures.popen 133 | ~~~~~~~~~~~~~~~~~~ 134 | 135 | .. automodule:: testfixtures.popen 136 | :members: 137 | 138 | .. currentmodule:: testfixtures 139 | 140 | Assertions 141 | ---------- 142 | 143 | .. autoclass:: testfixtures.shouldraise.NoException 144 | 145 | .. autoclass:: ShouldRaise 146 | :members: 147 | 148 | .. autoclass:: should_raise 149 | 150 | .. autofunction:: ShouldAssert 151 | 152 | .. autoclass:: ShouldWarn 153 | :members: 154 | 155 | .. autoclass:: ShouldNotWarn 156 | :members: 157 | 158 | 159 | Resources 160 | --------- 161 | 162 | .. autoclass:: TempDirectory 163 | :members: 164 | 165 | .. autofunction:: tempdir 166 | 167 | .. autofunction:: generator 168 | 169 | 170 | Helpers and Constants 171 | --------------------- 172 | 173 | .. autofunction:: diff 174 | 175 | .. autofunction:: wrap 176 | 177 | .. data:: not_there 178 | 179 | A singleton used to represent the absence of a particular attribute or parameter. 180 | 181 | 182 | Framework Helpers 183 | ----------------- 184 | 185 | Framework-specific helpers provided by testfixtures. 186 | 187 | testfixtures.django 188 | ~~~~~~~~~~~~~~~~~~~ 189 | 190 | .. automodule:: testfixtures.django 191 | :members: 192 | 193 | testfixtures.sybil 194 | ~~~~~~~~~~~~~~~~~~ 195 | 196 | 197 | .. autoclass:: testfixtures.sybil.FileParser 198 | :member-order: bysource 199 | :members: 200 | 201 | 202 | testfixtures.twisted 203 | ~~~~~~~~~~~~~~~~~~~~ 204 | 205 | .. automodule:: testfixtures.twisted 206 | :member-order: bysource 207 | :members: 208 | -------------------------------------------------------------------------------- /testfixtures/tests/test_sybil.py: -------------------------------------------------------------------------------- 1 | from textwrap import dedent 2 | from unittest import TestCase 3 | 4 | from testfixtures.mock import Mock 5 | from sybil.document import Document 6 | 7 | from testfixtures import compare, Comparison as C, TempDirectory 8 | from testfixtures.sybil import FileParser, FileBlock 9 | 10 | 11 | class TestFileParser(TestCase): 12 | 13 | def check_document(self, text, expected): 14 | d = Document(dedent(text), path='/dev/null') 15 | compare( 16 | list(r.parsed for r in FileParser('td')(d)), 17 | expected=expected 18 | ) 19 | 20 | def test_multiple_files(self): 21 | self.check_document( 22 | text=""" 23 | 24 | .. topic:: file.txt 25 | :class: write-file 26 | 27 | line 1 28 | 29 | line 2 30 | line 3 31 | 32 | .. topic:: file2.txt 33 | :class: read-file 34 | 35 | 36 | line 4 37 | 38 | line 5 39 | line 6 40 | 41 | """, 42 | expected = [ 43 | C(FileBlock, 44 | path='file.txt', 45 | content="line 1\n\nline 2\nline 3\n", 46 | action='write'), 47 | C(FileBlock, 48 | path='file2.txt', 49 | content='line 4\n\nline 5\nline 6\n', 50 | action='read'), 51 | ]) 52 | 53 | def test_ignore_literal_blocking(self): 54 | self.check_document( 55 | text=""" 56 | .. topic:: file.txt 57 | :class: write-file 58 | 59 | :: 60 | 61 | line 1 62 | 63 | line 2 64 | line 3 65 | """, 66 | expected=[ 67 | C(FileBlock, 68 | path='file.txt', 69 | content="line 1\n\nline 2\nline 3\n", 70 | action='write'), 71 | ]) 72 | 73 | def test_file_followed_by_text(self): 74 | self.check_document( 75 | text=""" 76 | 77 | .. topic:: file.txt 78 | :class: write-file 79 | 80 | print("hello") 81 | out = 'there' 82 | 83 | foo = 'bar' 84 | 85 | This is just some normal text! 86 | """, 87 | expected=[ 88 | C(FileBlock, 89 | path='file.txt', 90 | content='print("hello")' 91 | '\nout = \'there\'\n\nfoo = \'bar\'\n', 92 | action='write'), 93 | ]) 94 | 95 | def test_red_herring(self): 96 | self.check_document( 97 | text=""" 98 | .. topic:: file.txt 99 | :class: not-a-file 100 | 101 | print "hello" 102 | out = 'there' 103 | 104 | """, 105 | expected=[] 106 | ) 107 | 108 | def test_no_class(self): 109 | self.check_document( 110 | text=""" 111 | .. topic:: file.txt 112 | 113 | print "hello" 114 | out = 'there' 115 | 116 | """, 117 | expected=[] 118 | ) 119 | 120 | def check_evaluate(self, dir, block, expected): 121 | parser = FileParser('td') 122 | compare(expected, actual=parser.evaluate(Mock( 123 | parsed=block, 124 | namespace={'td': dir}, 125 | path='/the/file', 126 | line=42, 127 | ))) 128 | 129 | def test_evaluate_read_same(self): 130 | with TempDirectory() as dir: 131 | dir.write('foo', b'content') 132 | self.check_evaluate( 133 | dir, 134 | FileBlock('foo', 'content', 'read'), 135 | expected=None 136 | ) 137 | 138 | def test_evaluate_read_difference(self): 139 | with TempDirectory() as dir: 140 | dir.write('foo', b'actual') 141 | self.check_evaluate( 142 | dir, 143 | FileBlock('foo', 'expected', 'read'), 144 | expected=( 145 | "--- File '/the/file', line 42:\n" 146 | "+++ Reading from \"{}/foo\":\n" 147 | "@@ -1 +1 @@\n" 148 | "-expected\n" 149 | "+actual" 150 | ).format(dir.path) 151 | ) 152 | 153 | def test_evaluate_write(self): 154 | with TempDirectory() as dir: 155 | self.check_evaluate( 156 | dir, 157 | FileBlock('foo', 'content', 'write'), 158 | expected=None 159 | ) 160 | dir.compare(['foo']) 161 | compare(dir.read('foo', 'ascii'), 'content') 162 | -------------------------------------------------------------------------------- /docs/exceptions.txt: -------------------------------------------------------------------------------- 1 | Testing exceptions 2 | ================== 3 | 4 | .. currentmodule:: testfixtures 5 | 6 | Testfixtures has tools to help when making assertions about exceptions that should be raised by 7 | a piece of code. 8 | 9 | The :class:`ShouldRaise` context manager 10 | ---------------------------------------- 11 | 12 | The :class:`ShouldRaise` context manager is the recommended way to make assertions about 13 | a piece of code that should raise exceptions. 14 | 15 | Suppose we wanted to test the following function to make sure that the 16 | right exception was raised: 17 | 18 | .. code-block:: python 19 | 20 | def the_thrower(throw=True): 21 | if throw: 22 | raise ValueError('Not good!') 23 | 24 | The following example shows how to test that the correct exception is 25 | raised: 26 | 27 | >>> from testfixtures import ShouldRaise 28 | >>> with ShouldRaise(ValueError('Not good!')): 29 | ... the_thrower() 30 | 31 | If the exception raised doesn't match the one expected, 32 | :class:`ShouldRaise` will raise an :class:`AssertionError` 33 | causing the tests in which it occurs to fail: 34 | 35 | >>> with ShouldRaise(ValueError('Is good!')): 36 | ... the_thrower() 37 | Traceback (most recent call last): 38 | ... 39 | AssertionError: ValueError('Is good!'...) (expected) != ValueError('Not good!'...) (raised) 40 | 41 | If you're not concerned about anything more than the type of the 42 | exception that's raised, you can check as follows: 43 | 44 | >>> from testfixtures import ShouldRaise 45 | >>> with ShouldRaise(ValueError): 46 | ... the_thrower() 47 | 48 | If you're feeling slack and just want to check that an exception is 49 | raised, but don't care about the type of that exception, the following 50 | will suffice: 51 | 52 | >>> from testfixtures import ShouldRaise 53 | >>> with ShouldRaise(): 54 | ... the_thrower() 55 | 56 | If no exception is raised by the code under test, :class:`ShouldRaise` 57 | will raise an :class:`AssertionError` to indicate this: 58 | 59 | >>> from testfixtures import ShouldRaise 60 | >>> with ShouldRaise(): 61 | ... the_thrower(throw=False) 62 | Traceback (most recent call last): 63 | ... 64 | testfixtures.shouldraise.NoException: No exception raised! 65 | 66 | :class:`ShouldRaise` has been implemented such that it can be 67 | used to test code that raises all exceptions, including :class:`SystemExit` and 68 | :class:`KeyboardInterrupt` exceptions. 69 | 70 | To help with :class:`SystemExit` and other exceptions that are 71 | tricky to construct yourself, :class:`ShouldRaise` instances have a 72 | :attr:`~ShouldRaise.raised` attribute. This will contain the actual 73 | exception raised and can be used to inspect parts of it: 74 | 75 | >>> import sys 76 | >>> from testfixtures import ShouldRaise 77 | >>> with ShouldRaise() as s: 78 | ... sys.exit(42) 79 | >>> s.raised.code 80 | 42 81 | 82 | The :func:`should_raise` decorator 83 | ----------------------------------------- 84 | 85 | If you are working in a traditional :mod:`unittest` environment and 86 | want to check that a particular test function raises an exception, you 87 | may find the decorator suits your needs better: 88 | 89 | .. code-block:: python 90 | 91 | from testfixtures import should_raise 92 | 93 | @should_raise(ValueError('Not good!')) 94 | def test_function(): 95 | the_thrower() 96 | 97 | This decorator behaves exactly as the :class:`ShouldRaise` context 98 | manager described in the documentation above. 99 | 100 | .. note:: 101 | 102 | It is slightly recommended that you use the context manager rather 103 | than the decorator in most cases. With the decorator, all exceptions 104 | raised within the decorated function will be checked, which can 105 | hinder test development. With the context manager, you can make 106 | assertions about only the exact lines of code that you expect to 107 | raise the exception. 108 | 109 | Exceptions that are conditionally raised 110 | ---------------------------------------- 111 | 112 | Some exceptions are only raised in certain versions of Python. For 113 | example, in Python 2, ``bytes()`` will turn both bytes and strings into 114 | bytes, while in Python 3, it will raise an exception when presented 115 | with a string. If you wish to make assertions that this behaviour is 116 | expected, you can use the ``unless`` option to :class:`ShouldRaise` 117 | as follows: 118 | 119 | .. code-block:: python 120 | 121 | import sys 122 | from testfixtures import ShouldRaise 123 | 124 | PY2 = sys.version_info[:2] < (3, 0) 125 | 126 | with ShouldRaise(TypeError, unless=PY2): 127 | bytes('something') 128 | 129 | .. note:: 130 | 131 | Do **not** abuse this functionality to make sloppy assertions. It is 132 | always better have two different tests that cover a case when an 133 | exception should be raised and a case where an exception should not 134 | be raised rather than using it above functionality. It is *only* 135 | provided to help in cases where something in the environment that 136 | cannot be mocked out or controlled influences whether or not an 137 | exception is raised. 138 | -------------------------------------------------------------------------------- /testfixtures/tests/test_roundcomparison.py: -------------------------------------------------------------------------------- 1 | from decimal import Decimal 2 | from testfixtures import RoundComparison as R, compare, ShouldRaise 3 | from unittest import TestCase 4 | 5 | 6 | class Tests(TestCase): 7 | 8 | def test_equal_yes_rhs(self): 9 | self.assertTrue(0.123457 == R(0.123456, 5)) 10 | 11 | def test_equal_yes_lhs(self): 12 | self.assertTrue(R(0.123456, 5) == 0.123457) 13 | 14 | def test_equal_no_rhs(self): 15 | self.assertFalse(0.123453 == R(0.123456, 5)) 16 | 17 | def test_equal_no_lhs(self): 18 | self.assertFalse(R(0.123456, 5) == 0.123453) 19 | 20 | def test_not_equal_yes_rhs(self): 21 | self.assertFalse(0.123457 != R(0.123456, 5)) 22 | 23 | def test_not_equal_yes_lhs(self): 24 | self.assertFalse(R(0.123456, 5) != 0.123457) 25 | 26 | def test_not_equal_no_rhs(self): 27 | self.assertTrue(0.123453 != R(0.123456, 5)) 28 | 29 | def test_not_equal_no_lhs(self): 30 | self.assertTrue(R(0.123456, 5) != 0.123453) 31 | 32 | def test_equal_in_sequence_rhs(self): 33 | self.assertEqual((1, 2, 0.123457), 34 | (1, 2, R(0.123456, 5))) 35 | 36 | def test_equal_in_sequence_lhs(self): 37 | self.assertEqual((1, 2, R(0.123456, 5)), 38 | (1, 2, 0.123457)) 39 | 40 | def test_not_equal_in_sequence_rhs(self): 41 | self.assertNotEqual((1, 2, 0.1236), 42 | (1, 2, R(0.123456, 5))) 43 | 44 | def test_not_equal_in_sequence_lhs(self): 45 | self.assertNotEqual((1, 2, R(0.123456, 5)), 46 | (1, 2, 0.1236)) 47 | 48 | def test_not_numeric_rhs(self): 49 | with ShouldRaise(TypeError): 50 | 'abc' == R(0.123456, 5) 51 | 52 | def test_not_numeric_lhs(self): 53 | with ShouldRaise(TypeError): 54 | R(0.123456, 5) == 'abc' 55 | 56 | def test_repr(self): 57 | compare('', 58 | repr(R(0.123456, 5))) 59 | 60 | def test_str(self): 61 | compare('', 62 | repr(R(0.123456, 5))) 63 | 64 | def test_str_negative(self): 65 | compare('', repr(R(123456, -2))) 66 | 67 | TYPE_ERROR_DECIMAL = TypeError( 68 | "Cannot compare with " 69 | ) 70 | 71 | def test_equal_yes_decimal_to_float_rhs(self): 72 | with ShouldRaise(self.TYPE_ERROR_DECIMAL): 73 | self.assertTrue(Decimal("0.123457") == R(0.123456, 5)) 74 | 75 | def test_equal_yes_decimal_to_float_lhs(self): 76 | with ShouldRaise(self.TYPE_ERROR_DECIMAL): 77 | self.assertTrue(R(0.123456, 5) == Decimal("0.123457")) 78 | 79 | def test_equal_no_decimal_to_float_rhs(self): 80 | with ShouldRaise(self.TYPE_ERROR_DECIMAL): 81 | self.assertFalse(Decimal("0.123453") == R(0.123456, 5)) 82 | 83 | def test_equal_no_decimal_to_float_lhs(self): 84 | with ShouldRaise(self.TYPE_ERROR_DECIMAL): 85 | self.assertFalse(R(0.123456, 5) == Decimal("0.123453")) 86 | 87 | TYPE_ERROR_FLOAT = TypeError( 88 | "Cannot compare with " 89 | ) 90 | 91 | def test_equal_yes_float_to_decimal_rhs(self): 92 | with ShouldRaise(self.TYPE_ERROR_FLOAT): 93 | self.assertTrue(0.123457 == R(Decimal("0.123456"), 5)) 94 | 95 | def test_equal_yes_float_to_decimal_lhs(self): 96 | with ShouldRaise(self.TYPE_ERROR_FLOAT): 97 | self.assertTrue(R(Decimal("0.123456"), 5) == 0.123457) 98 | 99 | def test_equal_no_float_to_decimal_rhs(self): 100 | with ShouldRaise(self.TYPE_ERROR_FLOAT): 101 | self.assertFalse(0.123453 == R(Decimal("0.123456"), 5)) 102 | 103 | def test_equal_no_float_to_decimal_lhs(self): 104 | with ShouldRaise(self.TYPE_ERROR_FLOAT): 105 | self.assertFalse(R(Decimal("0.123456"), 5) == 0.123453) 106 | 107 | def test_integer_float(self): 108 | with ShouldRaise(TypeError): 109 | 1 == R(1.000001, 5) 110 | 111 | def test_float_integer(self): 112 | with ShouldRaise(TypeError): 113 | R(1.000001, 5) == 1 114 | 115 | def test_equal_yes_integer_other_rhs(self): 116 | self.assertTrue(10 == R(11, -1)) 117 | 118 | def test_equal_yes_integer_lhs(self): 119 | self.assertTrue(R(11, -1) == 10) 120 | 121 | def test_equal_no_integer_rhs(self): 122 | self.assertFalse(10 == R(16, -1)) 123 | 124 | def test_equal_no_integer_lhs(self): 125 | self.assertFalse(R(16, -1) == 10) 126 | 127 | def test_equal_integer_zero_precision(self): 128 | self.assertTrue(1 == R(1, 0)) 129 | 130 | def test_equal_yes_negative_precision(self): 131 | self.assertTrue(149.123 == R(101.123, -2)) 132 | 133 | def test_equal_no_negative_precision(self): 134 | self.assertFalse(149.123 == R(150.001, -2)) 135 | 136 | def test_decimal_yes_rhs(self): 137 | self.assertTrue(Decimal('0.123457') == R(Decimal('0.123456'), 5)) 138 | 139 | def test_decimal_yes_lhs(self): 140 | self.assertTrue(R(Decimal('0.123456'), 5) == Decimal('0.123457')) 141 | 142 | def test_decimal_no_rhs(self): 143 | self.assertFalse(Decimal('0.123453') == R(Decimal('0.123456'), 5)) 144 | 145 | def test_decimal_no_lhs(self): 146 | self.assertFalse(R(Decimal('0.123456'), 5) == Decimal('0.123453')) 147 | -------------------------------------------------------------------------------- /testfixtures/shouldraise.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | from functools import wraps 3 | from types import TracebackType 4 | from typing import Callable, TypeAlias, Iterator, Self, ParamSpec, TypeVar, Generic 5 | 6 | from testfixtures import diff, compare 7 | from .comparison import split_repr 8 | 9 | 10 | param_docs = """ 11 | 12 | :param exception: This can be one of the following: 13 | 14 | * `None`, indicating that an exception must be 15 | raised, but the type is unimportant. 16 | 17 | * An exception class, indicating that the type 18 | of the exception is important but not the 19 | parameters it is created with. 20 | 21 | * An exception instance, indicating that an 22 | exception exactly matching the one supplied 23 | should be raised. 24 | 25 | :param unless: Can be passed a boolean that, when ``True`` indicates that 26 | no exception is expected. This is useful when checking 27 | that exceptions are only raised on certain versions of 28 | Python. 29 | """ 30 | 31 | 32 | class NoException(AssertionError): 33 | """ 34 | A marker class indicating no exception has been raised. 35 | 36 | .. currentmodule:: testfixtures 37 | 38 | :attr:`ShouldRaise.raised` is set to an instance of this class unless an 39 | exception has otherwise been seen. 40 | """ 41 | 42 | def __init__(self) -> None: 43 | super().__init__('No exception raised!') 44 | 45 | 46 | E = TypeVar("E", bound=BaseException) 47 | 48 | 49 | class ShouldRaise(Generic[E]): 50 | __doc__ = """ 51 | This context manager is used to assert that an exception is raised 52 | within the context it is managing. 53 | """ + param_docs 54 | 55 | #: The exception captured by the context manager. 56 | #: Can be used to inspect specific attributes of the exception. 57 | raised: E = NoException() # type: ignore[assignment] 58 | 59 | def __init__(self, exception: E | type[E] | None = None, unless: bool | None = False): 60 | self.exception = exception 61 | self.expected = not unless 62 | 63 | def __enter__(self) -> Self: 64 | return self 65 | 66 | def __exit__( 67 | self, 68 | exc_type: type[BaseException] | None, 69 | actual: BaseException | None, 70 | traceback: TracebackType | None, 71 | ) -> bool: 72 | __tracebackhide__ = True 73 | self.raised = actual or NoException() # type: ignore[assignment] 74 | if self.expected: 75 | if self.exception: 76 | actual_: type[BaseException] | BaseException | None = actual 77 | if actual is not None: 78 | if isinstance(self.exception, type): 79 | actual_ = type(actual) 80 | if self.exception is not actual_: 81 | return False 82 | else: 83 | if type(self.exception) is not type(actual): 84 | return False 85 | compare(self.exception, 86 | actual_, 87 | x_label='expected', 88 | y_label='raised') 89 | elif not actual: 90 | raise NoException() 91 | elif actual: 92 | return False 93 | return True 94 | 95 | 96 | P = ParamSpec("P") 97 | T = TypeVar("T") 98 | 99 | 100 | class should_raise(Generic[E]): 101 | __doc__ = """ 102 | A decorator to assert that the decorated function will raised 103 | an exception. An exception class or exception instance may be 104 | passed to check more specifically exactly what exception will be 105 | raised. 106 | """ + param_docs 107 | 108 | def __init__(self, exception: E | type[E] | None = None, unless: bool | None = None): 109 | self.exception = exception 110 | self.unless = unless 111 | 112 | def __call__(self, target: Callable[P, T]) -> Callable[P, None]: 113 | 114 | @wraps(target) 115 | def _should_raise_wrapper(*args: P.args, **kw: P.kwargs) -> None: 116 | with ShouldRaise(self.exception, self.unless): 117 | target(*args, **kw) 118 | 119 | return _should_raise_wrapper 120 | 121 | 122 | @contextmanager 123 | def ShouldAssert(expected_text: str, show_whitespace: bool = False) -> Iterator[None]: 124 | """ 125 | A context manager to check that an :class:`AssertionError` 126 | is raised and its text is as expected. 127 | 128 | :param show_whitespace: If `True`, then whitespace characters in 129 | multi-line strings will be replaced with their 130 | representations. 131 | """ 132 | try: 133 | yield 134 | except AssertionError as e: 135 | actual_text = str(e) 136 | if expected_text != actual_text: 137 | if show_whitespace: 138 | expected_text = split_repr(expected_text) 139 | actual_text = split_repr(actual_text) 140 | raise AssertionError(diff(expected_text, actual_text, 141 | x_label='expected', y_label='actual')) 142 | else: 143 | raise AssertionError('Expected AssertionError(%r), None raised!' % 144 | expected_text) 145 | -------------------------------------------------------------------------------- /testfixtures/tests/test_outputcapture.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from subprocess import call 3 | from unittest import TestCase 4 | 5 | from _pytest.capture import CaptureFixture 6 | from testfixtures import OutputCapture, compare, StringComparison 7 | from .test_compare import CompareHelper 8 | 9 | 10 | class TestOutputCapture(CompareHelper, TestCase): 11 | 12 | def test_compare_strips(self) -> None: 13 | with OutputCapture() as o: 14 | print(' Bar! ') 15 | o.compare('Bar!') 16 | 17 | def test_compare_doesnt_strip(self) -> None: 18 | with OutputCapture(strip_whitespace=False) as o: 19 | print(' Bar! ') 20 | self.check_raises( 21 | '\tBar!', 22 | compare=o.compare, 23 | message="'\\tBar!' (expected) != ' Bar! \\n' (actual)", 24 | ) 25 | 26 | def test_stdout_and_stderr(self) -> None: 27 | with OutputCapture() as o: 28 | print('hello', file=sys.stdout) 29 | print('out', file=sys.stderr) 30 | print('there', file=sys.stdout) 31 | print('now', file=sys.stderr) 32 | o.compare("hello\nout\nthere\nnow\n") 33 | 34 | def test_unicode(self) -> None: 35 | with OutputCapture() as o: 36 | print(u'\u65e5', file=sys.stdout) 37 | o.compare(u'\u65e5\n') 38 | 39 | def test_separate_capture(self) -> None: 40 | with OutputCapture(separate=True) as o: 41 | print('hello', file=sys.stdout) 42 | print('out', file=sys.stderr) 43 | print('there', file=sys.stdout) 44 | print('now', file=sys.stderr) 45 | o.compare(stdout="hello\nthere\n", 46 | stderr="out\nnow\n") 47 | 48 | def test_compare_both_at_once(self) -> None: 49 | with OutputCapture(separate=True) as o: 50 | print('hello', file=sys.stdout) 51 | print('out', file=sys.stderr) 52 | self.check_raises( 53 | stdout="out\n", 54 | stderr="hello\n", 55 | compare=o.compare, 56 | message=( 57 | 'dict not as expected:\n' 58 | '\n' 59 | 'values differ:\n' 60 | "'stderr': 'hello' (expected) != 'out' (actual)\n" 61 | "'stdout': 'out' (expected) != 'hello' (actual)\n" 62 | '\n' 63 | "While comparing ['stderr']: 'hello' (expected) != 'out' (actual)\n" 64 | '\n' 65 | "While comparing ['stdout']: 'out' (expected) != 'hello' (actual)" 66 | ), 67 | ) 68 | 69 | def test_original_restore(self) -> None: 70 | o_out, o_err = sys.stdout, sys.stderr 71 | with OutputCapture() as o: 72 | self.assertFalse(sys.stdout is o_out) 73 | self.assertFalse(sys.stderr is o_err) 74 | self.assertTrue(sys.stdout is o_out) 75 | self.assertTrue(sys.stderr is o_err) 76 | 77 | def test_double_disable(self) -> None: 78 | o_out, o_err = sys.stdout, sys.stderr 79 | with OutputCapture() as o: 80 | self.assertFalse(sys.stdout is o_out) 81 | self.assertFalse(sys.stderr is o_err) 82 | o.disable() 83 | self.assertTrue(sys.stdout is o_out) 84 | self.assertTrue(sys.stderr is o_err) 85 | o.disable() 86 | self.assertTrue(sys.stdout is o_out) 87 | self.assertTrue(sys.stderr is o_err) 88 | self.assertTrue(sys.stdout is o_out) 89 | self.assertTrue(sys.stderr is o_err) 90 | 91 | def test_double_enable(self) -> None: 92 | o_out, o_err = sys.stdout, sys.stderr 93 | with OutputCapture() as o: 94 | o.disable() 95 | self.assertTrue(sys.stdout is o_out) 96 | self.assertTrue(sys.stderr is o_err) 97 | o.enable() 98 | self.assertFalse(sys.stdout is o_out) 99 | self.assertFalse(sys.stderr is o_err) 100 | o.enable() 101 | self.assertFalse(sys.stdout is o_out) 102 | self.assertFalse(sys.stderr is o_err) 103 | self.assertTrue(sys.stdout is o_out) 104 | self.assertTrue(sys.stderr is o_err) 105 | 106 | def test_compare_expected_is_stringcomparison(self) -> None: 107 | with OutputCapture() as output: 108 | print('foo') 109 | output.compare(StringComparison(r'^foo\Z')) 110 | 111 | def test_compare_stdout_and_stdderr_are_stringcomparisons(self) -> None: 112 | with OutputCapture(separate=True) as output: 113 | print('hello', file=sys.stdout) 114 | print('world', file=sys.stderr) 115 | output.compare(stdout=StringComparison(r'^hello\Z'), stderr=StringComparison(r'^world\Z')) 116 | 117 | 118 | class TestOutputCaptureWithDescriptors: 119 | 120 | def test_fd(self, capfd: CaptureFixture) -> None: 121 | with capfd.disabled(), OutputCapture(fd=True) as o: 122 | call([sys.executable, '-c', "import sys; sys.stdout.write('out')"]) 123 | call([sys.executable, '-c', "import sys; sys.stderr.write('err')"]) 124 | compare(o.captured, expected='outerr') 125 | o.compare(expected='outerr') 126 | 127 | def test_fd_separate(self, capfd: CaptureFixture) -> None: 128 | with capfd.disabled(), OutputCapture(fd=True, separate=True) as o: 129 | call([sys.executable, '-c', "import sys; sys.stdout.write('out')"]) 130 | call([sys.executable, '-c', "import sys; sys.stderr.write('err')"]) 131 | compare(o.captured, expected='') 132 | o.compare(stdout='out', stderr='err') 133 | -------------------------------------------------------------------------------- /testfixtures/twisted.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tools for helping to test Twisted applications. 3 | """ 4 | from pprint import pformat 5 | from typing import Sequence, Callable, Any, TypeAlias, Self 6 | from unittest import TestCase 7 | 8 | from constantly import NamedConstant 9 | from twisted.logger import globalLogPublisher, formatEvent, LogLevel, ILogObserver, LogEvent 10 | 11 | from . import compare 12 | import zope.interface 13 | 14 | @zope.interface.implementer(ILogObserver) 15 | class LogCapture: 16 | """ 17 | A helper for capturing stuff logged using Twisted's loggers. 18 | 19 | :param fields: 20 | A sequence of field names that :meth:`~LogCapture.check` will use to build 21 | "actual" events to compare against the expected events passed in. 22 | If items are strings, they will be treated as keys info the Twisted logging event. 23 | If they are callable, they will be called with the event as their only parameter. 24 | If only one field is specified, "actual" events will just be that one field; 25 | otherwise they will be a tuple of the specified fields. 26 | """ 27 | 28 | def __init__(self, fields: Sequence[str | Callable] = ('log_level', formatEvent,)): 29 | #: The list of events captured. 30 | self.events: list[LogEvent] = [] 31 | self.fields = fields 32 | 33 | def __call__(self, event: LogEvent) -> None: 34 | self.events.append(event) 35 | 36 | def install(self) -> None: 37 | "Start capturing." 38 | self.original_observers = globalLogPublisher._observers 39 | globalLogPublisher._observers = [self] 40 | 41 | def uninstall(self) -> None: 42 | "Stop capturing." 43 | globalLogPublisher._observers = self.original_observers 44 | 45 | def check(self, *expected: LogEvent, order_matters: bool = True) -> None: 46 | """ 47 | Check captured events against those supplied. Please see the ``fields`` parameter 48 | to the constructor to see how "actual" events are built. 49 | 50 | :param order_matters: 51 | This defaults to ``True``. If ``False``, the order of expected logging versus 52 | actual logging will be ignored. 53 | """ 54 | actual_event: Any 55 | actual = [] 56 | for event in self.events: 57 | actual_event = tuple(field(event) if callable(field) else event.get(field) 58 | for field in self.fields) 59 | if len(actual_event) == 1: 60 | actual_event = actual_event[0] 61 | actual.append(actual_event) 62 | if order_matters: 63 | compare(expected=expected, actual=actual) 64 | else: 65 | expected_ = list(expected) 66 | matched = [] 67 | unmatched = [] 68 | for entry in actual: 69 | try: 70 | index = expected_.index(entry) 71 | except ValueError: 72 | unmatched.append(entry) 73 | else: 74 | matched.append(expected_.pop(index)) 75 | if expected_: 76 | raise AssertionError(( 77 | 'entries not as expected:\n\n' 78 | 'expected and found:\n%s\n\n' 79 | 'expected but not found:\n%s\n\n' 80 | 'other entries:\n%s' 81 | ) % (pformat(matched), pformat(expected_), pformat(unmatched))) 82 | 83 | def check_failure_text(self, expected: str, index: int = -1, attribute: str = 'value') -> None: 84 | """ 85 | Check the string representation of an attribute of a logged ``Failure`` is as expected. 86 | 87 | :param expected: The expected string representation. 88 | :param index: The index into :attr:`events` where the failure should have been logged. 89 | :param attribute: The attribute of the failure of which to find the string representation. 90 | """ 91 | compare(expected, actual=str(getattr(self.events[index]['log_failure'], attribute))) 92 | 93 | def raise_logged_failure(self, start_index: int = 0) -> None: 94 | """ 95 | A debugging tool that raises the first failure encountered in captured logging. 96 | 97 | :param start_index: The index into :attr:`events` from where to start looking for failures. 98 | """ 99 | for event in self.events[start_index:]: 100 | failure = event.get('log_failure') 101 | if failure: 102 | raise failure 103 | 104 | @classmethod 105 | def make(cls, testcase: TestCase, **kw: Sequence[str | Callable]) -> Self: 106 | """ 107 | Instantiate, install and add a cleanup for a :class:`LogCapture`. 108 | 109 | :param testcase: This must be an instance of :class:`twisted.trial.unittest.TestCase`. 110 | :param kw: Any other parameters are passed directly to the :class:`LogCapture` constructor. 111 | :return: The :class:`LogCapture` instantiated by this method. 112 | """ 113 | capture = cls(**kw) 114 | capture.install() 115 | testcase.addCleanup(capture.uninstall) 116 | return capture 117 | 118 | 119 | #: Short reference to Twisted's ``LogLevel.debug`` 120 | DEBUG: NamedConstant = LogLevel.debug 121 | #: Short reference to Twisted's ``LogLevel.info`` 122 | INFO: NamedConstant = LogLevel.info 123 | #: Short reference to Twisted's ``LogLevel.warn`` 124 | WARN: NamedConstant = LogLevel.warn 125 | #: Short reference to Twisted's ``LogLevel.error`` 126 | ERROR: NamedConstant = LogLevel.error 127 | #: Short reference to Twisted's ``LogLevel.critical`` 128 | CRITICAL: NamedConstant = LogLevel.critical 129 | -------------------------------------------------------------------------------- /testfixtures/outputcapture.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from io import StringIO 4 | from tempfile import TemporaryFile 5 | from typing import Self, Any, IO 6 | 7 | from .comparison import StringComparison, compare 8 | 9 | 10 | class OutputCapture: 11 | """ 12 | A context manager for capturing output to the 13 | :any:`sys.stdout` and :any:`sys.stderr` streams. 14 | 15 | :param separate: If ``True``, ``stdout`` and ``stderr`` will be captured 16 | separately and their expected values must be passed to 17 | :meth:`~OutputCapture.compare`. 18 | 19 | :param fd: If ``True``, the underlying file descriptors will be captured, 20 | rather than just the attributes on :mod:`sys`. This allows 21 | you to capture things like subprocesses that write directly 22 | to the file descriptors, but is more invasive, so only use it 23 | when you need it. 24 | 25 | :param strip_whitespace: 26 | When ``True``, which is the default, leading and training whitespace 27 | is trimmed from both the expected and actual values when comparing. 28 | 29 | .. note:: If ``separate`` is passed as ``True``, 30 | :attr:`OutputCapture.captured` will be an empty string. 31 | """ 32 | 33 | output: IO 34 | stdout: IO 35 | stderr: IO 36 | 37 | original_stdout: IO[str] | int | None = None 38 | original_stderr: IO[str] | int | None = None 39 | 40 | def __init__(self, separate: bool = False, fd: bool = False, strip_whitespace: bool = True): 41 | self.separate = separate 42 | self.fd = fd 43 | self.strip_whitespace = strip_whitespace 44 | 45 | def __enter__(self) -> Self: 46 | if self.fd: 47 | self.output = TemporaryFile() 48 | self.stdout = TemporaryFile() 49 | self.stderr = TemporaryFile() 50 | else: 51 | self.output = StringIO() 52 | self.stdout = StringIO() 53 | self.stderr = StringIO() 54 | self.enable() 55 | return self 56 | 57 | def __exit__(self, *args: Any) -> None: 58 | self.disable() 59 | 60 | def disable(self) -> None: 61 | "Disable the output capture if it is enabled." 62 | if self.fd: 63 | for original, current in ( 64 | (self.original_stdout, sys.stdout), 65 | (self.original_stderr, sys.stderr), 66 | ): 67 | os.dup2(original, current.fileno()) # type: ignore[arg-type] 68 | os.close(original) # type: ignore[arg-type] 69 | 70 | else: 71 | sys.stdout = self.original_stdout 72 | sys.stderr = self.original_stderr 73 | 74 | def enable(self) -> None: 75 | "Enable the output capture if it is disabled." 76 | if self.original_stdout is None: 77 | if self.fd: 78 | self.original_stdout = os.dup(sys.stdout.fileno()) 79 | self.original_stderr = os.dup(sys.stderr.fileno()) 80 | else: 81 | self.original_stdout = sys.stdout 82 | self.original_stderr = sys.stderr 83 | if self.separate: 84 | if self.fd: 85 | os.dup2(self.stdout.fileno(), sys.stdout.fileno()) 86 | os.dup2(self.stderr.fileno(), sys.stderr.fileno()) 87 | else: 88 | sys.stdout = self.stdout 89 | sys.stderr = self.stderr 90 | else: 91 | if self.fd: 92 | os.dup2(self.output.fileno(), sys.stdout.fileno()) 93 | os.dup2(self.output.fileno(), sys.stderr.fileno()) 94 | else: 95 | sys.stdout = sys.stderr = self.output 96 | 97 | def _read(self, stream: IO | StringIO) -> str: 98 | if self.fd: 99 | stream.seek(0) 100 | return stream.read().decode() # type: ignore[union-attr] 101 | else: 102 | return stream.getvalue() # type: ignore[union-attr] 103 | 104 | @property 105 | def captured(self) -> str: 106 | "A property containing any output that has been captured so far." 107 | return self._read(self.output) 108 | 109 | def compare( 110 | self, 111 | expected: str | StringComparison = '', 112 | stdout: str | StringComparison = '', 113 | stderr: str | StringComparison = '' 114 | ) -> None: 115 | """ 116 | Compare the captured output to that expected. If the output is 117 | not the same, an :class:`AssertionError` will be raised. 118 | 119 | :param expected: A string containing the expected combined output 120 | of ``stdout`` and ``stderr``. 121 | 122 | :param stdout: A string containing the expected output to ``stdout``. 123 | 124 | :param stderr: A string containing the expected output to ``stderr``. 125 | """ 126 | expected_mapping = {} 127 | actual_mapping = {} 128 | for prefix, _expected, captured in ( 129 | ('captured', expected, self.captured), 130 | ('stdout', stdout, self._read(self.stdout)), 131 | ('stderr', stderr, self._read(self.stderr)), 132 | ): 133 | if self.strip_whitespace: 134 | if isinstance(_expected, str): 135 | _expected = _expected.strip() 136 | captured = captured.strip() 137 | if _expected != captured: 138 | expected_mapping[prefix] = _expected 139 | actual_mapping[prefix] = captured 140 | if len(expected_mapping) == 1: 141 | compare(expected=tuple(expected_mapping.values())[0], 142 | actual=tuple(actual_mapping.values())[0]) 143 | compare(expected=expected_mapping, actual=actual_mapping) 144 | -------------------------------------------------------------------------------- /testfixtures/tests/test_rangecomparison.py: -------------------------------------------------------------------------------- 1 | from decimal import Decimal 2 | from testfixtures import RangeComparison as R, ShouldRaise, compare 3 | from unittest import TestCase 4 | 5 | 6 | class Tests(TestCase): 7 | 8 | def test_equal_yes_rhs(self): 9 | self.assertTrue(5 == R(2, 5)) 10 | 11 | def test_equal_yes_lhs(self): 12 | self.assertTrue(R(2, 5) == 2) 13 | 14 | def test_equal_no_rhs(self): 15 | self.assertFalse(5 == R(2, 4)) 16 | 17 | def test_equal_no_lhs(self): 18 | self.assertFalse(R(2, 3) == 5) 19 | 20 | def test_not_equal_yes_rhs(self): 21 | self.assertTrue(5 != R(2, 2)) 22 | 23 | def test_not_equal_yes_lhs(self): 24 | self.assertTrue(R(2, 4) != 1) 25 | 26 | def test_not_equal_no_rhs(self): 27 | self.assertFalse(5 != R(-10, 10)) 28 | 29 | def test_not_equal_no_lhs(self): 30 | self.assertFalse(R(2, 5) != 2) 31 | 32 | def test_equal_in_sequence_rhs(self): 33 | self.assertEqual((1, 2, 5), 34 | (1, 2, R(2, 5))) 35 | 36 | def test_equal_in_sequence_lhs(self): 37 | self.assertEqual((1, 2, R(2, 5)), 38 | (1, 2, 5)) 39 | 40 | def test_not_equal_in_sequence_rhs(self): 41 | self.assertNotEqual((1, 2, 5), 42 | (1, 2, R(2, 4))) 43 | 44 | def test_not_equal_in_sequence_lhs(self): 45 | self.assertNotEqual((1, 2, R(2, 4)), 46 | (1, 2, 5)) 47 | 48 | def test_not_numeric_rhs(self): 49 | with ShouldRaise(TypeError): 50 | 'abc' == R(2, 5) 51 | with ShouldRaise(TypeError): 52 | {} == R(2, 5) 53 | with ShouldRaise(TypeError): 54 | [] == R(2, 5) 55 | 56 | def test_not_numeric_lhs(self): 57 | with ShouldRaise(TypeError): 58 | R(2, 5) == 'abc' 59 | with ShouldRaise(TypeError): 60 | R(2, 5) == {} 61 | with ShouldRaise(TypeError): 62 | R(2, 5) == [] 63 | 64 | def test_repr(self): 65 | compare('', 66 | repr(R(2, 5))) 67 | 68 | def test_str(self): 69 | compare('', 70 | str(R(2, 5))) 71 | 72 | def test_str_negative(self): 73 | compare('', repr(R(2, 5))) 74 | 75 | def test_equal_yes_decimal_lhs(self): 76 | self.assertTrue(R(2, 5) == Decimal(3)) 77 | 78 | def test_equal_yes_decimal_rhs(self): 79 | self.assertTrue(Decimal(3) == R(2, 5)) 80 | 81 | def test_equal_no_decimal_lhs(self): 82 | self.assertFalse(R(2, 5) == Decimal(1.0)) 83 | 84 | def test_equal_no_decimal_rhs(self): 85 | self.assertFalse(Decimal(1.0) == R(2, 5)) 86 | 87 | def test_equal_yes_float_lhs(self): 88 | self.assertTrue(R(2, 5) == 3.0) 89 | 90 | def test_equal_yes_float_rhs(self): 91 | self.assertTrue(3.0 == R(2, 5)) 92 | 93 | def test_equal_no_float_lhs(self): 94 | self.assertFalse(R(2, 5) == 1.0) 95 | 96 | def test_equal_no_float_rhs(self): 97 | self.assertFalse(1.0 == R(2, 5)) 98 | 99 | def test_equal_yes_decimal_in_range_lhs(self): 100 | self.assertTrue(R(Decimal(1), 5) == 3) 101 | self.assertTrue(R(1, Decimal(5)) == 3) 102 | self.assertTrue(R(Decimal(1), Decimal(5)) == 3) 103 | 104 | def test_equal_yes_decimal_in_range_rhs(self): 105 | self.assertTrue(3 == R(Decimal(1), 5)) 106 | self.assertTrue(3 == R(1, Decimal(5))) 107 | self.assertTrue(3 == R(Decimal(1), Decimal(5))) 108 | 109 | def test_equal_no_decimal_in_range_lhs(self): 110 | self.assertFalse(R(Decimal(1), 5) == 6) 111 | self.assertFalse(R(1, Decimal(5)) == 6) 112 | self.assertFalse(R(Decimal(1), Decimal(5)) == 6) 113 | 114 | def test_equal_no_decimal_in_range_rhs(self): 115 | self.assertFalse(6 == R(Decimal(1), 5)) 116 | self.assertFalse(6 == R(1, Decimal(5))) 117 | self.assertFalse(6 == R(Decimal(1), Decimal(5))) 118 | 119 | def test_equal_yes_float_in_range_lhs(self): 120 | self.assertTrue(R(1.0, 5) == 3) 121 | self.assertTrue(R(1, 5.0) == 3) 122 | self.assertTrue(R(1.0, 5.0) == 3) 123 | 124 | def test_equal_yes_float_in_range_rhs(self): 125 | self.assertTrue(3 == R(1.0, 5)) 126 | self.assertTrue(3 == R(1, 5.0)) 127 | self.assertTrue(3 == R(1.0, 5.0)) 128 | 129 | def test_equal_no_float_in_range_lhs(self): 130 | self.assertFalse(R(1.0, 5) == 6) 131 | self.assertFalse(R(1, 5.0) == 6) 132 | self.assertFalse(R(1.0, 5.0) == 6) 133 | 134 | def test_equal_no_float_in_range_rhs(self): 135 | self.assertFalse(6 == R(1.0, 5)) 136 | self.assertFalse(6 == R(1, 5.0)) 137 | self.assertFalse(6 == R(1.0, 5.0)) 138 | 139 | def test_equal_yes_negative_lhs(self): 140 | self.assertTrue(R(-5, 5) == -3) 141 | self.assertTrue(R(-10, -5) == -7) 142 | 143 | def test_equal_yes_negative_rhs(self): 144 | self.assertTrue(-2 == R(-5, 5)) 145 | self.assertTrue(-7 == R(-10, -5)) 146 | 147 | def test_equal_no_negative_lhs(self): 148 | self.assertFalse(R(-5, 5) == -10) 149 | self.assertFalse(R(-10, -5) == -3) 150 | 151 | def test_equal_no_negative_rhs(self): 152 | self.assertFalse(-10 == R(-5, 5)) 153 | self.assertFalse(-30 == R(-10, -5)) 154 | 155 | def test_equal_yes_no_range_lhs(self): 156 | self.assertTrue(R(0, 0) == 0) 157 | self.assertTrue(R(2, 2) == 2) 158 | self.assertTrue(R(-1, -1) == -1) 159 | 160 | def test_equal_yes_no_range_rhs(self): 161 | self.assertTrue(0 == R(0, 0)) 162 | self.assertTrue(2 == R(2, 2)) 163 | self.assertTrue(-1 == R(-1, -1)) 164 | 165 | def test_equal_no_no_range_lhs(self): 166 | self.assertFalse(R(0, 0) == 1) 167 | self.assertFalse(R(2, 2) == 1) 168 | self.assertFalse(R(-1, -1) == 11) 169 | 170 | def test_equal_no_no_range_rhs(self): 171 | self.assertFalse(1 == R(0, 0)) 172 | self.assertFalse(1 == R(2, 2)) 173 | self.assertFalse(1 == R(-1, -1)) 174 | -------------------------------------------------------------------------------- /testfixtures/tests/test_twisted.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | pytest.importorskip("twisted") 4 | 5 | from twisted.logger import Logger, formatEvent 6 | from twisted.python.failure import Failure 7 | from twisted.trial.unittest import TestCase 8 | 9 | from testfixtures import compare, ShouldRaise, StringComparison as S, ShouldAssert 10 | from testfixtures.twisted import LogCapture, INFO 11 | 12 | log = Logger() 13 | 14 | 15 | class TestLogCapture(TestCase): 16 | 17 | def test_simple(self): 18 | capture = LogCapture.make(self) 19 | log.info('er, {greeting}', greeting='hi') 20 | capture.check((INFO, 'er, hi')) 21 | 22 | def test_captured(self): 23 | capture = LogCapture.make(self) 24 | log.info('er, {greeting}', greeting='hi') 25 | assert len(capture.events) == 1 26 | compare(capture.events[0]['log_namespace'], expected='testfixtures.tests.test_twisted') 27 | 28 | def test_fields(self): 29 | capture = LogCapture.make(self, fields=('a', 'b')) 30 | log.info('{a}, {b}', a=1, b=2) 31 | log.info('{a}, {b}', a=3, b=4) 32 | capture.check( 33 | [1, 2], 34 | [3, 4], 35 | ) 36 | 37 | def test_field(self): 38 | capture = LogCapture.make(self, fields=(formatEvent,)) 39 | log.info('er, {greeting}', greeting='hi') 40 | capture.check('er, hi') 41 | 42 | def test_check_failure_test_minimal(self): 43 | capture = LogCapture.make(self) 44 | try: 45 | raise Exception('all gone wrong') 46 | except: 47 | log.failure('oh dear') 48 | capture.check_failure_text('all gone wrong') 49 | self.flushLoggedErrors() 50 | 51 | def test_check_failure_test_maximal(self): 52 | capture = LogCapture.make(self) 53 | try: 54 | raise TypeError('all gone wrong') 55 | except: 56 | log.failure('oh dear') 57 | log.info("don't look at me...") 58 | capture.check_failure_text(str(TypeError), index=0, attribute='type') 59 | self.flushLoggedErrors() 60 | self.flushLoggedErrors() 61 | 62 | def test_raise_logged_failure(self): 63 | capture = LogCapture.make(self) 64 | try: 65 | raise TypeError('all gone wrong') 66 | except: 67 | log.failure('oh dear') 68 | with ShouldRaise(Failure) as s: 69 | capture.raise_logged_failure() 70 | compare(s.raised.value, expected=TypeError('all gone wrong')) 71 | self.flushLoggedErrors() 72 | 73 | def test_raise_later_logged_failure(self): 74 | capture = LogCapture.make(self) 75 | try: 76 | raise ValueError('boom!') 77 | except: 78 | log.failure('oh dear') 79 | try: 80 | raise TypeError('all gone wrong') 81 | except: 82 | log.failure('what now?!') 83 | with ShouldRaise(Failure) as s: 84 | capture.raise_logged_failure(start_index=1) 85 | compare(s.raised.value, expected=TypeError('all gone wrong')) 86 | self.flushLoggedErrors() 87 | 88 | def test_order_doesnt_matter_ok(self): 89 | capture = LogCapture.make(self) 90 | log.info('Failed to send BAR') 91 | log.info('Sent FOO, length 1234') 92 | log.info('Sent 1 Messages') 93 | capture.check( 94 | (INFO, S(r'Sent FOO, length \d+')), 95 | (INFO, 'Failed to send BAR'), 96 | (INFO, 'Sent 1 Messages'), 97 | order_matters=False 98 | ) 99 | 100 | def test_order_doesnt_matter_failure(self): 101 | capture = LogCapture.make(self) 102 | log.info('Failed to send BAR') 103 | log.info('Sent FOO, length 1234') 104 | log.info('Sent 1 Messages') 105 | with ShouldAssert( 106 | "entries not as expected:\n" 107 | "\n" 108 | "expected and found:\n" 109 | "[(, 'Failed to send BAR'), (, 'Sent 1 Messages')]\n" 110 | "\n" 111 | "expected but not found:\n" 112 | "[(, )]\n" 113 | "\n" 114 | "other entries:\n" 115 | "[(, 'Sent FOO, length 1234')]" 116 | ): 117 | capture.check( 118 | (INFO, S('Sent FOO, length abc')), 119 | (INFO, 'Failed to send BAR'), 120 | (INFO, 'Sent 1 Messages'), 121 | order_matters=False 122 | ) 123 | 124 | def test_order_doesnt_matter_extra_in_expected(self): 125 | capture = LogCapture.make(self) 126 | log.info('Failed to send BAR') 127 | log.info('Sent FOO, length 1234') 128 | with ShouldAssert( 129 | "entries not as expected:\n" 130 | "\n" 131 | "expected and found:\n" 132 | "[(, 'Failed to send BAR'),\n" 133 | " (, )]\n" 134 | "\n" 135 | "expected but not found:\n" 136 | "[(, 'Sent 1 Messages')]\n" 137 | "\n" 138 | "other entries:\n" 139 | "[]" 140 | ): 141 | capture.check( 142 | (INFO, S('Sent FOO, length 1234')), 143 | (INFO, 'Failed to send BAR'), 144 | (INFO, 'Sent 1 Messages'), 145 | order_matters=False 146 | ) 147 | 148 | def test_order_doesnt_matter_extra_in_actual(self): 149 | capture = LogCapture.make(self) 150 | log.info('Failed to send BAR') 151 | log.info('Sent FOO, length 1234') 152 | log.info('Sent 1 Messages') 153 | with ShouldAssert( 154 | "entries not as expected:\n" 155 | "\n" 156 | "expected and found:\n" 157 | "[(, 'Failed to send BAR'), (, 'Sent 1 Messages')]\n" 158 | "\n" 159 | "expected but not found:\n" 160 | "[(, )]\n" 161 | "\n" 162 | "other entries:\n" 163 | "[(, 'Sent FOO, length 1234')]" 164 | ): 165 | capture.check( 166 | (INFO, S('Sent FOO, length abc')), 167 | (INFO, 'Failed to send BAR'), 168 | (INFO, 'Sent 1 Messages'), 169 | order_matters=False 170 | ) 171 | -------------------------------------------------------------------------------- /testfixtures/tests/test_shouldwarn.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | import warnings 4 | 5 | from testfixtures import ( 6 | ShouldWarn, compare, ShouldRaise, ShouldNotWarn, 7 | Comparison as C 8 | ) 9 | from testfixtures.shouldraise import ShouldAssert 10 | 11 | 12 | class ShouldWarnTests(TestCase): 13 | 14 | def test_warn_expected(self): 15 | with warnings.catch_warnings(record=True) as backstop: 16 | with ShouldWarn(UserWarning('foo')): 17 | warnings.warn('foo') 18 | compare(len(backstop), expected=0) 19 | 20 | def test_warn_not_expected(self): 21 | with ShouldAssert( 22 | "\n\n" 23 | "same:\n[]\n\n" 24 | "expected:\n[]\n\n" 25 | "actual:\n[UserWarning('foo')]\n" 26 | " (expected) " 27 | "!= [UserWarning('foo')] (actual)" 28 | ): 29 | with warnings.catch_warnings(record=True) as backstop: 30 | with ShouldNotWarn(): 31 | warnings.warn('foo') 32 | compare(len(backstop), expected=0) 33 | 34 | def test_no_warn_expected(self): 35 | with ShouldNotWarn(): 36 | pass 37 | 38 | def test_no_warn_not_expected(self): 39 | with ShouldAssert( 40 | "\n\n" 41 | "same:\n[]\n\n" 42 | "expected:\n[args: ('foo',)]" 43 | "\n\nactual:\n[]\n" 44 | " (expected) != [] (actual)" 45 | ): 46 | with ShouldWarn(UserWarning('foo')): 47 | pass 48 | 49 | def test_filters_removed(self): 50 | with warnings.catch_warnings(): 51 | warnings.simplefilter("ignore") 52 | with ShouldWarn(UserWarning("foo")): 53 | warnings.warn('foo') 54 | 55 | def test_multiple_warnings(self): 56 | with ShouldRaise(AssertionError) as s: 57 | with ShouldWarn(UserWarning('foo')): 58 | warnings.warn('foo') 59 | warnings.warn('bar') 60 | content = str(s.raised) 61 | self.assertTrue('foo' in content) 62 | self.assertTrue('bar' in content) 63 | 64 | def test_multiple_warnings_ordered(self): 65 | with warnings.catch_warnings(record=True) as backstop: 66 | with ShouldWarn(UserWarning('foo'), UserWarning('bar')): 67 | warnings.warn('foo') 68 | warnings.warn('bar') 69 | compare(len(backstop), expected=0) 70 | 71 | def test_multiple_warnings_wrong_order(self): 72 | with ShouldRaise(AssertionError) as s: 73 | with ShouldWarn(UserWarning('foo'), UserWarning('bar')): 74 | warnings.warn('bar') 75 | warnings.warn('foo') 76 | content = str(s.raised) 77 | self.assertTrue('foo' in content) 78 | self.assertTrue('bar' in content) 79 | 80 | def test_multiple_warnings_ignore_order(self): 81 | with warnings.catch_warnings(record=True) as backstop: 82 | with ShouldWarn(UserWarning('foo'), UserWarning('bar'), order_matters=False): 83 | warnings.warn('bar') 84 | warnings.warn('foo') 85 | compare(len(backstop), expected=0) 86 | 87 | def test_minimal_ok(self): 88 | with ShouldWarn(UserWarning): 89 | warnings.warn('foo') 90 | 91 | def test_minimal_bad(self): 92 | with ShouldAssert( 93 | "\n\n" 94 | "same:\n[]\n\n" 95 | "expected:\n" 96 | "[wrong type]\n\n" 97 | "actual:\n[UserWarning('foo')]\n" 98 | " (expected) " 99 | "!= [UserWarning('foo')] (actual)" 100 | ): 101 | with ShouldWarn(DeprecationWarning): 102 | warnings.warn('foo') 103 | 104 | def test_maximal_ok(self): 105 | with ShouldWarn(DeprecationWarning('foo')): 106 | warnings.warn_explicit( 107 | 'foo', DeprecationWarning, 'bar.py', 42, 'bar_module' 108 | ) 109 | 110 | def test_maximal_bad(self): 111 | with ShouldAssert( 112 | "\n\n" 113 | "same:\n[]\n\n" 114 | "expected:\n[\n" 115 | "\n" 116 | "attributes differ:\n" 117 | "'args': ('bar',) (Comparison) != ('foo',) (actual)\n" 118 | "]\n\n" 119 | "actual:\n[DeprecationWarning('foo')]\n" 120 | " (expected) " 121 | "!= [DeprecationWarning('foo')] (actual)" 122 | ): 123 | with ShouldWarn(DeprecationWarning('bar')): 124 | warnings.warn_explicit( 125 | 'foo', DeprecationWarning, 'bar.py', 42, 'bar_module' 126 | ) 127 | 128 | def test_maximal_explore(self): 129 | with ShouldWarn() as recorded: 130 | warnings.warn_explicit( 131 | 'foo', DeprecationWarning, 'bar.py', 42, 'bar_module' 132 | ) 133 | compare(len(recorded), expected=1) 134 | 135 | expected_attrs = dict( 136 | _category_name='DeprecationWarning', 137 | category=DeprecationWarning, 138 | file=None, 139 | filename='bar.py', 140 | line=None, 141 | lineno=42, 142 | message=C(DeprecationWarning('foo')), 143 | source=None 144 | ) 145 | 146 | compare(expected=C(warnings.WarningMessage, **expected_attrs), 147 | actual=recorded[0]) 148 | 149 | def test_filter_present(self): 150 | with ShouldWarn(DeprecationWarning, 151 | message="This function is deprecated."): 152 | warnings.warn("This utility is deprecated.", DeprecationWarning) 153 | warnings.warn("This function is deprecated.", DeprecationWarning) 154 | 155 | def test_filter_missing(self): 156 | with ShouldAssert( 157 | "\n\n" 158 | "same:\n[]\n\n" 159 | "expected:\n[]\n\n" 160 | "actual:\n[]\n" 161 | " (expected) != [] (actual)" 162 | ): 163 | with ShouldWarn(DeprecationWarning, 164 | message="This function is deprecated."): 165 | warnings.warn("This utility is deprecated.", DeprecationWarning) 166 | -------------------------------------------------------------------------------- /testfixtures/tests/test_wrap.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from testfixtures.mock import Mock, MagicMock, patch, DEFAULT 4 | 5 | from testfixtures import wrap, compare, log_capture, LogCapture 6 | 7 | 8 | class TestWrap(TestCase): 9 | 10 | def test_wrapping(self): 11 | 12 | m = Mock() 13 | 14 | @wrap(m.before, m.after) 15 | def test_function(r): 16 | m.test() 17 | return 'something' 18 | 19 | compare(m.method_calls, []) 20 | compare(test_function(), 'something') 21 | compare(m.method_calls, [ 22 | ('before', (), {}), 23 | ('test', (), {}), 24 | ('after', (), {}) 25 | ]) 26 | 27 | def test_wrapping_only_before(self): 28 | 29 | before = Mock() 30 | 31 | @wrap(before) 32 | def test_function(): 33 | return 'something' 34 | 35 | self.assertFalse(before.called) 36 | compare(test_function(), 'something') 37 | compare(before.call_count, 1) 38 | 39 | def test_wrapping_wants_return(self): 40 | 41 | m = Mock() 42 | m.before.return_value = 'something' 43 | 44 | @wrap(m.before, m.after) 45 | def test_function(r): 46 | m.test(r) 47 | return 'r:'+r 48 | 49 | compare(m.method_calls, []) 50 | compare(test_function(), 'r:something') 51 | compare(m.method_calls, [ 52 | ('before', (), {}), 53 | ('test', ('something', ), {}), 54 | ('after', (), {}) 55 | ]) 56 | 57 | def test_wrapping_wants_arguments(self): 58 | 59 | # This only works in python 2.5+, for 60 | # earlier versions, you'll have to come 61 | # up with your own `partial` class... 62 | from functools import partial 63 | 64 | m = Mock() 65 | 66 | @wrap(partial(m.before, 1, x=2), partial(m.after, 3, y=4)) 67 | def test_function(r): 68 | m.test() 69 | return 'something' 70 | 71 | compare(m.method_calls, []) 72 | compare(test_function(), 'something') 73 | compare(m.method_calls, [ 74 | ('before', (1, ), {'x': 2}), 75 | ('test', (), {}), 76 | ('after', (3, ), {'y': 4}) 77 | ]) 78 | 79 | def test_multiple_wrappers(self): 80 | 81 | m = Mock() 82 | 83 | @wrap(m.before2, m.after2) 84 | @wrap(m.before1, m.after1) 85 | def test_function(): 86 | m.test_function() 87 | return 'something' 88 | 89 | compare(m.method_calls, []) 90 | compare(test_function(), 'something') 91 | compare(m.method_calls, [ 92 | ('before1', (), {}), 93 | ('before2', (), {}), 94 | ('test_function', (), {}), 95 | ('after2', (), {}), 96 | ('after1', (), {}), 97 | ]) 98 | 99 | def test_multiple_wrappers_wants_return(self): 100 | 101 | m = Mock() 102 | m.before1.return_value = 1 103 | m.before2.return_value = 2 104 | 105 | @wrap(m.before2, m.after2) 106 | @wrap(m.before1, m.after1) 107 | def test_function(r1, r2): 108 | m.test_function(r1, r2) 109 | return 'something' 110 | 111 | compare(m.method_calls, []) 112 | compare(test_function(), 'something') 113 | compare(m.method_calls, [ 114 | ('before1', (), {}), 115 | ('before2', (), {}), 116 | ('test_function', (1, 2), {}), 117 | ('after2', (), {}), 118 | ('after1', (), {}), 119 | ]) 120 | 121 | def test_multiple_wrappers_only_want_first_return(self): 122 | 123 | m = Mock() 124 | m.before1.return_value = 1 125 | 126 | @wrap(m.before2, m.after2) 127 | @wrap(m.before1, m.after1) 128 | def test_function(r1): 129 | m.test_function(r1) 130 | return 'something' 131 | 132 | compare(m.method_calls, []) 133 | compare(test_function(), 'something') 134 | compare(m.method_calls, [ 135 | ('before1', (), {}), 136 | ('before2', (), {}), 137 | ('test_function', (1, ), {}), 138 | ('after2', (), {}), 139 | ('after1', (), {}), 140 | ]) 141 | 142 | def test_wrap_method(self): 143 | 144 | m = Mock() 145 | 146 | class T: 147 | @wrap(m.before, m.after) 148 | def method(self): 149 | m.method() 150 | 151 | T().method() 152 | 153 | compare(m.method_calls, [ 154 | ('before', (), {}), 155 | ('method', (), {}), 156 | ('after', (), {}) 157 | ]) 158 | 159 | def test_wrap_method_wants_return(self): 160 | 161 | m = Mock() 162 | m.before.return_value = 'return' 163 | 164 | class T: 165 | @wrap(m.before, m.after) 166 | def method(self, r): 167 | m.method(r) 168 | 169 | T().method() 170 | 171 | compare(m.method_calls, [ 172 | ('before', (), {}), 173 | ('method', ('return', ), {}), 174 | ('after', (), {}) 175 | ]) 176 | 177 | def test_wrapping_different_functions(self): 178 | 179 | m = Mock() 180 | 181 | @wrap(m.before1, m.after1) 182 | def test_function1(): 183 | m.something1() 184 | return 'something1' 185 | 186 | @wrap(m.before2, m.after2) 187 | def test_function2(): 188 | m.something2() 189 | return 'something2' 190 | 191 | compare(m.method_calls, []) 192 | compare(test_function1(), 'something1') 193 | compare(m.method_calls, [ 194 | ('before1', (), {}), 195 | ('something1', (), {}), 196 | ('after1', (), {}) 197 | ]) 198 | compare(test_function2(), 'something2') 199 | compare(m.method_calls, [ 200 | ('before1', (), {}), 201 | ('something1', (), {}), 202 | ('after1', (), {}), 203 | ('before2', (), {}), 204 | ('something2', (), {}), 205 | ('after2', (), {}) 206 | ]) 207 | 208 | def test_wrapping_local_vars(self): 209 | 210 | m = Mock() 211 | 212 | @wrap(m.before, m.after) 213 | def test_function(): 214 | something = 1 215 | m.test() 216 | return 'something' 217 | 218 | compare(m.method_calls, []) 219 | compare(test_function(), 'something') 220 | compare(m.method_calls, [ 221 | ('before', (), {}), 222 | ('test', (), {}), 223 | ('after', (), {}) 224 | ]) 225 | 226 | def test_wrapping__name__(self): 227 | 228 | m = Mock() 229 | 230 | @wrap(m.before, m.after) 231 | def test_function(): 232 | pass # pragma: no cover 233 | 234 | compare(test_function.__name__, 'test_function') 235 | 236 | def test_our_wrap_dealing_with_mock_patch(self): 237 | 238 | @patch.multiple('testfixtures.tests.sample1', X=DEFAULT) 239 | @log_capture() 240 | def patched(log, X): 241 | from testfixtures.tests.sample1 import X as imported_X 242 | assert isinstance(log, LogCapture) 243 | assert isinstance(X, MagicMock) 244 | assert imported_X is X 245 | 246 | patched() 247 | 248 | def test_patch_with_dict(self): 249 | @patch('testfixtures.tests.sample1.X', {'x': 1}) 250 | @log_capture() 251 | def patched(log): 252 | assert isinstance(log, LogCapture) 253 | from testfixtures.tests.sample1 import X 254 | assert X == {'x': 1} 255 | 256 | patched() 257 | -------------------------------------------------------------------------------- /docs/popen.txt: -------------------------------------------------------------------------------- 1 | 2 | .. currentmodule:: testfixtures.popen 3 | 4 | Testing subprocesses 5 | ==================== 6 | 7 | When using the :mod:`subprocess` package there are two approaches to testing: 8 | 9 | * Have your tests exercise the real processes being instantiated and used. 10 | 11 | * Mock out use of the :mod:`subprocess` package and provide expected output 12 | while recording interactions with the package to make sure they are as 13 | expected. 14 | 15 | While the first of these should be preferred, it means that you need to have all 16 | the external software available everywhere you wish to run tests. Your tests 17 | will also need to make sure any dependencies of that software on 18 | an external environment are met. If that external software takes a long time to 19 | run, your tests will also take a long time to run. 20 | 21 | These challenges can often make the second approach more practical and can 22 | be the more pragmatic approach when coupled with a mock that accurately 23 | simulates the behaviour of a subprocess. :class:`~testfixtures.popen.MockPopen` 24 | is an attempt to provide just such a mock. 25 | 26 | .. warning:: 27 | Previous versions of this mock made use of :attr:`~unittest.mock.Mock.mock_calls`. 28 | These are deceptively incapable of recording some information important in the use 29 | of this mock, so please switch to making assertions about 30 | :attr:`~MockPopen.all_calls` and :attr:`~MockPopenInstance.calls` instead. 31 | 32 | Example usage 33 | ------------- 34 | 35 | As an example, suppose you have code such as the following that you need to 36 | test: 37 | 38 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 39 | :lines: 4-12 40 | 41 | Tests that exercise this code using :class:`~testfixtures.popen.MockPopen` 42 | could be written as follows: 43 | 44 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 45 | :lines: 16-52 46 | 47 | 48 | Passing input to processes 49 | -------------------------- 50 | 51 | If your testing requires passing input to the subprocess, you can do so by 52 | checking for the input passed to :meth:`~subprocess.Popen.communicate` method 53 | when you check the calls on the mock as shown in this example: 54 | 55 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 56 | :pyobject: TestMyFunc.test_communicate_with_input 57 | :dedent: 4 58 | 59 | .. note:: Accessing ``.stdin`` isn't current supported by this mock. 60 | 61 | 62 | Reading from ``stdout`` and ``stderr`` 63 | -------------------------------------- 64 | 65 | The :attr:`~MockPopenInstance.stdout` and :attr:`~MockPopenInstance.stderr` 66 | attributes of the mock returned by 67 | :class:`~testfixtures.popen.MockPopen` will be file-like objects as with 68 | the real :class:`~subprocess.Popen` and can be read as shown in this example: 69 | 70 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 71 | :pyobject: TestMyFunc.test_read_from_stdout_and_stderr 72 | :dedent: 4 73 | 74 | .. warning:: 75 | 76 | While these streams behave a lot like the streams of a real 77 | :class:`~subprocess.Popen` object, they do not exhibit the deadlocking 78 | behaviour that can occur when the two streams are read as in the example 79 | above. Be very careful when reading :attr:`~MockPopenInstance.stdout` and 80 | :attr:`~MockPopenInstance.stderr` and 81 | consider using :meth:`~subprocess.Popen.communicate` instead. 82 | 83 | 84 | Writing to ``stdin`` 85 | -------------------- 86 | If you set ``stdin=PIPE`` in your call to :class:`~subprocess.Popen` then the 87 | :attr:`~MockPopenInstance.stdin` 88 | attribute of the mock returned by :class:`~testfixtures.popen.MockPopen` 89 | will be a mock and you can then examine the write calls to it as shown 90 | in this example: 91 | 92 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 93 | :pyobject: TestMyFunc.test_write_to_stdin 94 | :dedent: 4 95 | 96 | 97 | Specifying the return code 98 | -------------------------- 99 | 100 | Often code will need to behave differently depending on the return code of the 101 | launched process. Specifying a simulated response code, along with testing for 102 | the correct usage of :meth:`~subprocess.Popen.wait`, can be seen in the 103 | following example: 104 | 105 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 106 | :pyobject: TestMyFunc.test_wait_and_return_code 107 | :dedent: 4 108 | 109 | Checking for signal sending 110 | --------------------------- 111 | 112 | Calls to :meth:`~MockPopenInstance.send_signal`, 113 | :meth:`~MockPopenInstance.terminate` and :meth:`~MockPopenInstance.kill` are all 114 | recorded by the mock returned by :class:`~testfixtures.popen.MockPopen`. 115 | However, other than being recorded, these calls do nothing. 116 | The following example doesn't make sense for a real test of sub-process usage but 117 | does show how the mock behaves: 118 | 119 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 120 | :pyobject: TestMyFunc.test_send_signal 121 | :dedent: 4 122 | 123 | Polling a process 124 | ----------------- 125 | 126 | The :meth:`~subprocess.Popen.poll` method is often used as part of a loop 127 | in order to do other work while waiting for a sub-process to complete. 128 | The mock returned by :class:`~testfixtures.popen.MockPopen` supports this 129 | by allowing the :meth:`~MockPopenInstance.poll` method to 130 | be called a number of times before 131 | the :attr:`~MockPopenInstance.returncode` is set using the 132 | ``poll_count`` parameter as shown in 133 | the following example: 134 | 135 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 136 | :pyobject: TestMyFunc.test_poll_until_result 137 | :dedent: 4 138 | 139 | Different behaviour on sequential processes 140 | ------------------------------------------- 141 | 142 | If your code needs to call the same command but have different behaviour 143 | on each call, then you can pass a callable behaviour like this: 144 | 145 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 146 | :pyobject: TestMyFunc.test_multiple_responses 147 | :dedent: 4 148 | 149 | If you need to keep state across calls, such as accumulating 150 | :attr:`~MockPopenInstance.stdin` or 151 | failing for a configurable number of calls, then wrap that behaviour up 152 | into a class: 153 | 154 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 155 | :pyobject: CustomBehaviour 156 | 157 | This can then be used like this: 158 | 159 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 160 | :pyobject: TestMyFunc.test_count_down 161 | :dedent: 4 162 | 163 | 164 | Using default behaviour 165 | ----------------------- 166 | 167 | If you're testing something that needs to make many calls to many different 168 | commands that all behave the same, it can be tedious to specify the behaviour 169 | of each with :class:`~MockPopen.set_command`. For this case, :class:`~MockPopen` 170 | has the :class:`~MockPopen.set_default` method which can be used to set the 171 | behaviour of any command that has not been specified with 172 | :class:`~MockPopen.set_command` as shown in the 173 | following example: 174 | 175 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 176 | :pyobject: TestMyFunc.test_default_behaviour 177 | :dedent: 4 178 | 179 | 180 | Tracking multiple simultaneous processes 181 | ---------------------------------------- 182 | 183 | Conversely, if you're testing something that spins up multiple subprocesses 184 | and manages their simultaneous execution, you will want to explicitly define the 185 | behaviour of each process using :class:`~MockPopen.set_command` and then make 186 | assertions about each process using :attr:`~MockPopen.all_calls`. 187 | 188 | For example, suppose we wanted to test this function: 189 | 190 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 191 | :pyobject: process_in_batches 192 | 193 | Then you could test it as follows: 194 | 195 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 196 | :pyobject: TestMyFunc.test_multiple_processes 197 | :dedent: 4 198 | 199 | Note that the order of all calls is explicitly recorded. If the order of these calls 200 | is non-deterministic due to your method of process management, you may wish to use a 201 | :class:`~testfixtures.SequenceComparison`: 202 | 203 | .. literalinclude:: ../testfixtures/tests/test_popen_docs.py 204 | :pyobject: TestMyFunc.test_multiple_processes_unordered 205 | :dedent: 4 206 | -------------------------------------------------------------------------------- /testfixtures/tests/test_log_capture.py: -------------------------------------------------------------------------------- 1 | from logging import getLogger, ERROR 2 | from unittest import TestCase 3 | 4 | from testfixtures.shouldraise import ShouldAssert 5 | from testfixtures.mock import patch 6 | 7 | from testfixtures import ( 8 | log_capture, compare, Comparison as C, LogCapture 9 | ) 10 | 11 | root = getLogger() 12 | one = getLogger('one') 13 | two = getLogger('two') 14 | child = getLogger('one.child') 15 | 16 | 17 | class TestLog_Capture(TestCase): 18 | 19 | @log_capture('two', 'one.child') 20 | @log_capture('one') 21 | @log_capture() 22 | def test_logging(self, l1, l2, l3): 23 | # we can now log as normal 24 | root.info('1') 25 | one.info('2') 26 | two.info('3') 27 | child.info('4') 28 | # and later check what was logged 29 | l1.check( 30 | ('root', 'INFO', '1'), 31 | ('one', 'INFO', '2'), 32 | ('two', 'INFO', '3'), 33 | ('one.child', 'INFO', '4'), 34 | ) 35 | l2.check( 36 | ('one', 'INFO', '2'), 37 | ('one.child', 'INFO', '4') 38 | ) 39 | l3.check( 40 | ('two', 'INFO', '3'), 41 | ('one.child', 'INFO', '4') 42 | ) 43 | # each logger also exposes the real 44 | # log records should anything else be neeeded 45 | compare(l3.records, [ 46 | C('logging.LogRecord'), 47 | C('logging.LogRecord'), 48 | ]) 49 | 50 | @log_capture(ensure_checks_above=ERROR) 51 | def test_simple_strict(self, l): 52 | root.error('during') 53 | l.check(("root", "ERROR", "during")) 54 | 55 | def test_uninstall_properly(self): 56 | root = getLogger() 57 | child = getLogger('child') 58 | before_root = root.handlers[:] 59 | before_child = child.handlers[:] 60 | try: 61 | old_root_level = root.level 62 | root.setLevel(49) 63 | old_child_level = child.level 64 | child.setLevel(69) 65 | 66 | @log_capture('child') 67 | @log_capture() 68 | def test_method(l1, l2): 69 | root = getLogger() 70 | root.info('1') 71 | self.assertEqual(root.level, 1) 72 | child = getLogger('child') 73 | self.assertEqual(child.level, 1) 74 | child.info('2') 75 | l1.check( 76 | ('root', 'INFO', '1'), 77 | ('child', 'INFO', '2'), 78 | ) 79 | l2.check( 80 | ('child', 'INFO', '2'), 81 | ) 82 | 83 | test_method() 84 | 85 | self.assertEqual(root.level, 49) 86 | self.assertEqual(child.level, 69) 87 | 88 | self.assertEqual(root.handlers, before_root) 89 | self.assertEqual(child.handlers, before_child) 90 | 91 | finally: 92 | root.setLevel(old_root_level) 93 | child.setLevel(old_child_level) 94 | 95 | @log_capture() 96 | def test_decorator_returns_logcapture(self, l): 97 | # check for what we get, so we only have to write 98 | # tests in test_logcapture.py 99 | self.assertTrue(isinstance(l, LogCapture)) 100 | 101 | def test_remove_existing_handlers(self): 102 | logger = getLogger() 103 | # get original handlers 104 | original = logger.handlers 105 | try: 106 | # put in a stub which will blow up if used 107 | logger.handlers = start = [object()] 108 | 109 | @log_capture() 110 | def test_method(l): 111 | logger.info('during') 112 | l.check(('root', 'INFO', 'during')) 113 | 114 | test_method() 115 | 116 | compare(logger.handlers, start) 117 | 118 | finally: 119 | logger.handlers = original 120 | 121 | def test_clear_global_state(self): 122 | from logging import _handlers, _handlerList 123 | capture = LogCapture() 124 | capture.uninstall() 125 | self.assertFalse(capture in _handlers) 126 | self.assertFalse(capture in _handlerList) 127 | 128 | def test_no_propogate(self): 129 | logger = getLogger('child') 130 | # paranoid check 131 | compare(logger.propagate, True) 132 | 133 | @log_capture('child', propagate=False) 134 | def test_method(l): 135 | logger.info('a log message') 136 | l.check(('child', 'INFO', 'a log message')) 137 | 138 | with LogCapture() as global_log: 139 | test_method() 140 | 141 | global_log.check() 142 | compare(logger.propagate, True) 143 | 144 | def test_different_attributes(self): 145 | with LogCapture(attributes=('funcName', 'processName')) as log: 146 | getLogger().info('oh hai') 147 | log.check( 148 | ('test_different_attributes', 'MainProcess') 149 | ) 150 | 151 | def test_missing_attribute(self): 152 | with LogCapture(attributes=('msg', 'lolwut')) as log: 153 | getLogger().info('oh %s', 'hai') 154 | log.check( 155 | ('oh %s', None) 156 | ) 157 | 158 | def test_single_attribute(self): 159 | # one which isn't a string, to boot! 160 | with LogCapture(attributes=['msg']) as log: 161 | getLogger().info(dict(foo='bar', baz='bob')) 162 | log.check( 163 | dict(foo='bar', baz='bob'), 164 | ) 165 | 166 | def test_callable_instead_of_attribute(self): 167 | def extract_msg(record): 168 | return {k: v for (k, v) in record.msg.items() 169 | if k != 'baz'} 170 | with LogCapture(attributes=extract_msg) as log: 171 | getLogger().info(dict(foo='bar', baz='bob')) 172 | log.check( 173 | dict(foo='bar'), 174 | ) 175 | 176 | def test_msg_is_none(self): 177 | with LogCapture(attributes=('msg', 'foo')) as log: 178 | getLogger().info(None, extra=dict(foo='bar')) 179 | log.check( 180 | (None, 'bar') 181 | ) 182 | 183 | def test_normal_check(self): 184 | with LogCapture() as log: 185 | getLogger().info('oh hai') 186 | 187 | with ShouldAssert( 188 | "sequence not as expected:\n\n" 189 | "same:\n" 190 | "()\n\n" 191 | "expected:\n" 192 | "(('root', 'INFO', 'oh noez'),)\n\n" 193 | "actual:\n" 194 | "(('root', 'INFO', 'oh hai'),)" 195 | ): 196 | log.check(('root', 'INFO', 'oh noez')) 197 | 198 | def test_recursive_check(self): 199 | 200 | with LogCapture(recursive_check=True) as log: 201 | getLogger().info('oh hai') 202 | 203 | with ShouldAssert( 204 | "sequence not as expected:\n\n" 205 | "same:\n()\n\n" 206 | "expected:\n(('root', 'INFO', 'oh noez'),)\n\n" 207 | "actual:\n(('root', 'INFO', 'oh hai'),)\n\n" 208 | "While comparing [0]: sequence not as expected:\n\n" 209 | "same:\n('root', 'INFO')\n\n" 210 | "expected:\n" 211 | "('oh noez',)\n\n" 212 | "actual:\n" 213 | "('oh hai',)\n\n" 214 | "While comparing [0][2]: 'oh noez' (expected) != 'oh hai' (actual)" 215 | ): 216 | log.check(('root', 'INFO', 'oh noez')) 217 | 218 | @log_capture() 219 | @patch('testfixtures.tests.sample1.SampleClassA') 220 | def test_patch_then_log(self, a1, a2): 221 | actual = [type(c).__name__ for c in (a1, a2)] 222 | compare(actual, expected=['MagicMock', 'LogCaptureForDecorator']) 223 | 224 | @patch('testfixtures.tests.sample1.SampleClassA') 225 | @log_capture() 226 | def test_log_then_patch(self, a1, a2): 227 | actual = [type(c).__name__ for c in (a1, a2)] 228 | compare(actual, expected=['LogCaptureForDecorator', 'MagicMock']) 229 | 230 | 231 | class BaseCaptureTest(TestCase): 232 | a = 33 233 | 234 | @log_capture() 235 | def test_logs_if_a_smaller_than_44(self, logs): 236 | logger = getLogger() 237 | if self.a < 44: 238 | logger.info('{} is smaller than 44'.format(self.a)) 239 | 240 | logs.check( 241 | ('root', 'INFO', '{} is smaller than 44'.format(self.a)), 242 | ) 243 | 244 | 245 | class SubclassCaptureTest(BaseCaptureTest): 246 | a = 2 247 | -------------------------------------------------------------------------------- /testfixtures/tests/test_popen_docs.py: -------------------------------------------------------------------------------- 1 | # NB: This file is used in the documentation, if you make changes, ensure 2 | # you update the line numbers in popen.txt! 3 | 4 | from subprocess import Popen, PIPE 5 | 6 | 7 | def my_func(): 8 | process = Popen(['svn', 'ls', '-R', 'foo'], stdout=PIPE, stderr=PIPE) 9 | out, err = process.communicate() 10 | if process.returncode: 11 | raise RuntimeError('something bad happened') 12 | return out 13 | 14 | dotted_path = 'testfixtures.tests.test_popen_docs.Popen' 15 | 16 | from unittest import TestCase 17 | 18 | from testfixtures.mock import call 19 | from testfixtures import Replacer, ShouldRaise, compare, SequenceComparison 20 | from testfixtures.popen import MockPopen, PopenBehaviour 21 | 22 | 23 | class TestMyFunc(TestCase): 24 | 25 | def setUp(self): 26 | self.Popen = MockPopen() 27 | self.r = Replacer() 28 | self.r.replace(dotted_path, self.Popen) 29 | self.addCleanup(self.r.restore) 30 | 31 | def test_example(self): 32 | # set up 33 | self.Popen.set_command('svn ls -R foo', stdout=b'o', stderr=b'e') 34 | 35 | # testing of results 36 | compare(my_func(), b'o') 37 | 38 | # testing calls were in the right order and with the correct parameters: 39 | process = call.Popen(['svn', 'ls', '-R', 'foo'], stderr=PIPE, stdout=PIPE) 40 | compare(Popen.all_calls, expected=[ 41 | process, 42 | process.communicate() 43 | ]) 44 | 45 | def test_example_bad_returncode(self): 46 | # set up 47 | Popen.set_command('svn ls -R foo', stdout=b'o', stderr=b'e', 48 | returncode=1) 49 | 50 | # testing of error 51 | with ShouldRaise(RuntimeError('something bad happened')): 52 | my_func() 53 | 54 | def test_communicate_with_input(self): 55 | # setup 56 | Popen = MockPopen() 57 | Popen.set_command('a command') 58 | # usage 59 | process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) 60 | out, err = process.communicate('foo') 61 | # test call list 62 | compare(Popen.all_calls, expected=[ 63 | process.root_call, 64 | process.root_call.communicate('foo'), 65 | ]) 66 | 67 | def test_read_from_stdout_and_stderr(self): 68 | # setup 69 | Popen = MockPopen() 70 | Popen.set_command('a command', stdout=b'foo', stderr=b'bar') 71 | # usage 72 | process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) 73 | compare(process.stdout.read(), expected=b'foo') 74 | compare(process.stderr.read(), expected=b'bar') 75 | 76 | def test_write_to_stdin(self): 77 | # setup 78 | Popen = MockPopen() 79 | Popen.set_command('a command') 80 | # usage 81 | process = Popen('a command', stdin=PIPE, shell=True) 82 | process.stdin.write('some text') 83 | process.stdin.close() 84 | # test call list 85 | compare(Popen.all_calls, expected=[ 86 | process.root_call, 87 | process.root_call.stdin.write('some text'), 88 | process.root_call.stdin.close(), 89 | ]) 90 | 91 | def test_wait_and_return_code(self): 92 | # setup 93 | Popen = MockPopen() 94 | Popen.set_command('a command', returncode=3) 95 | # usage 96 | process = Popen('a command') 97 | compare(process.returncode, expected=None) 98 | # result checking 99 | compare(process.wait(), expected=3) 100 | compare(process.returncode, expected=3) 101 | # test call list 102 | compare(Popen.all_calls, expected=[ 103 | call.Popen('a command'), 104 | call.Popen('a command').wait(), 105 | ]) 106 | 107 | def test_send_signal(self): 108 | # setup 109 | Popen = MockPopen() 110 | Popen.set_command('a command') 111 | # usage 112 | process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) 113 | process.send_signal(0) 114 | # result checking 115 | compare(Popen.all_calls, expected=[ 116 | process.root_call, 117 | process.root_call.send_signal(0), 118 | ]) 119 | 120 | def test_poll_until_result(self): 121 | # setup 122 | Popen = MockPopen() 123 | Popen.set_command('a command', returncode=3, poll_count=2) 124 | # example usage 125 | process = Popen('a command') 126 | while process.poll() is None: 127 | # you'd probably have a sleep here, or go off and 128 | # do some other work. 129 | pass 130 | # result checking 131 | compare(process.returncode, expected=3) 132 | compare(Popen.all_calls, expected=[ 133 | process.root_call, 134 | process.root_call.poll(), 135 | process.root_call.poll(), 136 | process.root_call.poll(), 137 | ]) 138 | 139 | def test_default_behaviour(self): 140 | # set up 141 | self.Popen.set_default(stdout=b'o', stderr=b'e') 142 | 143 | # testing of results 144 | compare(my_func(), b'o') 145 | 146 | # testing calls were in the right order and with the correct parameters: 147 | root_call = call.Popen(['svn', 'ls', '-R', 'foo'], 148 | stderr=PIPE, stdout=PIPE) 149 | compare(Popen.all_calls, expected=[ 150 | root_call, 151 | root_call.communicate() 152 | ]) 153 | 154 | def test_multiple_responses(self): 155 | # set up 156 | behaviours = [ 157 | PopenBehaviour(stderr=b'e', returncode=1), 158 | PopenBehaviour(stdout=b'o'), 159 | ] 160 | 161 | def behaviour(command, stdin): 162 | return behaviours.pop(0) 163 | 164 | self.Popen.set_command('svn ls -R foo', behaviour=behaviour) 165 | 166 | # testing of error: 167 | with ShouldRaise(RuntimeError('something bad happened')): 168 | my_func() 169 | # testing of second call: 170 | compare(my_func(), b'o') 171 | 172 | def test_count_down(self): 173 | # set up 174 | self.Popen.set_command('svn ls -R foo', behaviour=CustomBehaviour()) 175 | # testing of error: 176 | with ShouldRaise(RuntimeError('something bad happened')): 177 | my_func() 178 | # testing of second call: 179 | compare(my_func(), b'o') 180 | 181 | def test_multiple_processes(self): 182 | # set up 183 | self.Popen.set_command('process --batch=0', stdout=b'42') 184 | self.Popen.set_command('process --batch=1', stdout=b'13') 185 | 186 | # testing of results 187 | compare(process_in_batches(2), expected=55) 188 | 189 | # testing of process management: 190 | p1 = call.Popen('process --batch=0', shell=True, stderr=PIPE, stdout=PIPE) 191 | p2 = call.Popen('process --batch=1', shell=True, stderr=PIPE, stdout=PIPE) 192 | compare(Popen.all_calls, expected=[ 193 | p1, 194 | p2, 195 | p1.communicate(), 196 | p2.communicate(), 197 | ]) 198 | 199 | def test_multiple_processes_unordered(self): 200 | # set up 201 | self.Popen.set_command('process --batch=0', stdout=b'42') 202 | self.Popen.set_command('process --batch=1', stdout=b'13') 203 | 204 | # testing of results 205 | compare(process_in_batches(2), expected=55) 206 | 207 | # testing of process management: 208 | p1 = call.Popen('process --batch=0', shell=True, stderr=PIPE, stdout=PIPE) 209 | p2 = call.Popen('process --batch=1', shell=True, stderr=PIPE, stdout=PIPE) 210 | compare(Popen.all_calls, expected=SequenceComparison( 211 | p2, 212 | p2.communicate(), 213 | p1, 214 | p1.communicate(), 215 | ordered=False 216 | )) 217 | 218 | 219 | class CustomBehaviour: 220 | 221 | def __init__(self, fail_count=1): 222 | self.fail_count = fail_count 223 | 224 | def __call__(self, command, stdin): 225 | while self.fail_count > 0: 226 | self.fail_count -= 1 227 | return PopenBehaviour(stderr=b'e', returncode=1) 228 | return PopenBehaviour(stdout=b'o') 229 | 230 | 231 | def process_in_batches(n): 232 | processes = [] 233 | for i in range(n): 234 | processes.append(Popen('process --batch='+str(i), 235 | stdout=PIPE, stderr=PIPE, shell=True)) 236 | total = 0 237 | for process in processes: 238 | out, err = process.communicate() 239 | total += int(out) 240 | return total 241 | -------------------------------------------------------------------------------- /testfixtures/tests/test_time.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | from unittest import TestCase 3 | 4 | from testfixtures import mock_time, replace, compare, ShouldRaise 5 | from testfixtures.datetime import MockTime 6 | from .test_datetime import SampleTZInfo 7 | 8 | 9 | class TestTime(TestCase): 10 | 11 | @replace('time.time', mock_time()) 12 | def test_time_call(self) -> None: 13 | from time import time 14 | compare(time(), 978307200.0) 15 | compare(time(), 978307201.0) 16 | compare(time(), 978307203.0) 17 | 18 | @replace('time.time', mock_time(2002, 1, 1, 1, 2, 3)) 19 | def test_time_supplied(self) -> None: 20 | from time import time 21 | compare(time(), 1009846923.0) 22 | 23 | @replace('time.time', mock_time(None)) 24 | def test_time_sequence(self, t: MockTime) -> None: 25 | t.add(2002, 1, 1, 1, 0, 0) 26 | t.add(2002, 1, 1, 2, 0, 0) 27 | t.add(2002, 1, 1, 3, 0, 0) 28 | from time import time 29 | compare(time(), 1009846800.0) 30 | compare(time(), 1009850400.0) 31 | compare(time(), 1009854000.0) 32 | 33 | @replace('time.time', mock_time(None)) 34 | def test_add_datetime_supplied(self, t: MockTime) -> None: 35 | from datetime import datetime 36 | from time import time 37 | t.add(datetime(2002, 1, 1, 2)) 38 | compare(time(), 1009850400.0) 39 | tzinfo = SampleTZInfo() 40 | tzrepr = repr(tzinfo) 41 | with ShouldRaise(ValueError( 42 | 'Cannot add datetime with tzinfo of %s as configured to use None' %( 43 | tzrepr 44 | ))): 45 | t.add(datetime(2001, 1, 1, tzinfo=tzinfo)) 46 | 47 | def test_instantiate_with_datetime(self) -> None: 48 | from datetime import datetime 49 | t = mock_time(datetime(2002, 1, 1, 2)) 50 | compare(t(), 1009850400.0) 51 | 52 | @replace('time.time', mock_time(None)) 53 | def test_now_requested_longer_than_supplied(self, t: MockTime) -> None: 54 | t.add(2002, 1, 1, 1, 0, 0) 55 | t.add(2002, 1, 1, 2, 0, 0) 56 | from time import time 57 | compare(time(), 1009846800.0) 58 | compare(time(), 1009850400.0) 59 | compare(time(), 1009850401.0) 60 | compare(time(), 1009850403.0) 61 | 62 | @replace('time.time', mock_time()) 63 | def test_call(self, t: MockTime) -> None: 64 | compare(t(), 978307200.0) 65 | from time import time 66 | compare(time(), 978307201.0) 67 | 68 | @replace('time.time', mock_time()) 69 | def test_repr_time(self) -> None: 70 | from time import time 71 | assert repr(time).startswith(' None: 75 | from time import time 76 | compare(time(), 978307200.0) 77 | compare(time(), 978307210.0) 78 | compare(time(), 978307220.0) 79 | 80 | @replace('time.time', mock_time(delta_type='minutes')) 81 | def test_delta_type(self) -> None: 82 | from time import time 83 | compare(time(), 978307200.0) 84 | compare(time(), 978307260.0) 85 | compare(time(), 978307380.0) 86 | 87 | @replace('time.time', mock_time(None)) 88 | def test_set(self, time_mock: MockTime) -> None: 89 | from time import time 90 | time_mock.set(2001, 1, 1, 1, 0, 1) 91 | compare(time(), 978310801.0) 92 | time_mock.set(2002, 1, 1, 1, 0, 0) 93 | compare(time(), 1009846800.0) 94 | compare(time(), 1009846802.0) 95 | 96 | @replace('time.time', mock_time(None)) 97 | def test_set_datetime_supplied(self, t: MockTime) -> None: 98 | from datetime import datetime 99 | from time import time 100 | t.set(datetime(2001, 1, 1, 1, 0, 1)) 101 | compare(time(), 978310801.0) 102 | tzinfo = SampleTZInfo() 103 | tzrepr = repr(tzinfo) 104 | with ShouldRaise(ValueError( 105 | 'Cannot add datetime with tzinfo of %s as configured to use None' %( 106 | tzrepr 107 | ))): 108 | t.set(datetime(2001, 1, 1, tzinfo=tzinfo)) 109 | 110 | @replace('time.time', mock_time(None)) 111 | def test_set_kw(self, time_mock: MockTime) -> None: 112 | from time import time 113 | time_mock.set(year=2001, month=1, day=1, hour=1, second=1) 114 | compare(time(), 978310801.0) 115 | 116 | @replace('time.time', mock_time(None)) 117 | def test_set_kw_tzinfo(self, time_mock: MockTime) -> None: 118 | with ShouldRaise(TypeError('Cannot add using tzinfo on MockTime')): 119 | time_mock.set(year=2001, tzinfo=SampleTZInfo()) 120 | 121 | @replace('time.time', mock_time(None)) 122 | def test_set_args_tzinfo(self, time_mock: MockTime) -> None: 123 | with ShouldRaise(TypeError('Cannot add using tzinfo on MockTime')): 124 | time_mock.set(2002, 1, 2, 3, 4, 5, 6, SampleTZInfo()) # type: ignore[arg-type] 125 | 126 | @replace('time.time', mock_time(None)) 127 | def test_add_kw(self, time_mock: MockTime) -> None: 128 | from time import time 129 | time_mock.add(year=2001, month=1, day=1, hour=1, second=1) 130 | compare(time(), 978310801.0) 131 | 132 | @replace('time.time', mock_time(None)) 133 | def test_add_tzinfo_kw(self, time_mock: MockTime) -> None: 134 | with ShouldRaise(TypeError('Cannot add using tzinfo on MockTime')): 135 | time_mock.add(year=2001, tzinfo=SampleTZInfo()) 136 | 137 | @replace('time.time', mock_time(None)) 138 | def test_add_tzinfo_args(self, time_mock: MockTime) -> None: 139 | with ShouldRaise(TypeError('Cannot add using tzinfo on MockTime')): 140 | time_mock.add(2001, 1, 2, 3, 4, 5, 6, SampleTZInfo()) # type: ignore[arg-type] 141 | 142 | @replace('time.time', mock_time(2001, 1, 2, 3, 4, 5, 600000)) 143 | def test_max_number_args(self) -> None: 144 | from time import time 145 | compare(time(), 978404645.6) 146 | 147 | def test_max_number_tzinfo(self) -> None: 148 | with ShouldRaise(TypeError( 149 | "You don't want to use tzinfo with test_time" 150 | )): 151 | mock_time(2001, 1, 2, 3, 4, 5, 6, SampleTZInfo()) # type: ignore[arg-type] 152 | 153 | @replace('time.time', mock_time(2001, 1, 2)) 154 | def test_min_number_args(self) -> None: 155 | from time import time 156 | compare(time(), 978393600.0) 157 | 158 | @replace('time.time', mock_time( 159 | year=2001, 160 | month=1, 161 | day=2, 162 | hour=3, 163 | minute=4, 164 | second=5, 165 | microsecond=6, 166 | )) 167 | def test_all_kw(self) -> None: 168 | from time import time 169 | compare(time(), 978404645.000006) 170 | 171 | def test_kw_tzinfo(self) -> None: 172 | with ShouldRaise(TypeError( 173 | "You don't want to use tzinfo with test_time" 174 | )): 175 | mock_time(year=2001, tzinfo=SampleTZInfo()) # type: ignore[arg-type] 176 | 177 | def test_instance_tzinfo(self) -> None: 178 | from datetime import datetime 179 | with ShouldRaise(TypeError( 180 | "You don't want to use tzinfo with test_time" 181 | )): 182 | mock_time(datetime(2001, 1, 1, tzinfo=SampleTZInfo())) 183 | 184 | def test_subsecond_deltas(self) -> None: 185 | time = mock_time(delta=0.5) 186 | compare(time(), 978307200.0) 187 | compare(time(), 978307200.5) 188 | compare(time(), 978307201.0) 189 | 190 | def test_ms_deltas(self) -> None: 191 | time = mock_time(delta=1000, delta_type='microseconds') 192 | compare(time(), 978307200.0) 193 | compare(time(), 978307200.001) 194 | compare(time(), 978307200.002) 195 | 196 | def test_tick_when_static(self) -> None: 197 | time = mock_time(delta=0) 198 | compare(time(), expected=978307200.0) 199 | time.tick(seconds=1) 200 | compare(time(), expected=978307201.0) 201 | 202 | def test_tick_when_dynamic(self) -> None: 203 | # hopefully not that common? 204 | time = mock_time() 205 | compare(time(), expected=978307200.0) 206 | time.tick(seconds=1) 207 | compare(time(), expected=978307202.0) 208 | 209 | def test_tick_with_timedelta_instance(self) -> None: 210 | time = mock_time(delta=0) 211 | compare(time(), expected=978307200.0) 212 | time.tick(timedelta(seconds=1)) 213 | compare(time(), expected=978307201.0) 214 | 215 | def test_old_import(self) -> None: 216 | from testfixtures import test_time 217 | assert test_time is mock_time 218 | 219 | def test_addition_to_no_params_call_indirect(self) -> None: 220 | mock = mock_time() 221 | # Calling the mock without any parameters definitely gives you a float: 222 | value = mock() 223 | assert isinstance(value, float) 224 | compare(value + 3600, expected=978307200.0 + 3600.0) 225 | 226 | def test_addition_to_no_params_call_direct(self) -> None: 227 | mock = mock_time() 228 | # ...but if if you do it in one step, mypy gets confused: 229 | compare(mock() + 3600, expected=978307200.0 + 3600.0) 230 | -------------------------------------------------------------------------------- /testfixtures/tests/test_mappingcomparison.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from textwrap import dedent 3 | 4 | from testfixtures import MappingComparison, ShouldRaise, compare 5 | 6 | 7 | def check_repr(obj, expected): 8 | compare(repr(obj), expected=dedent(expected).rstrip('\n')) 9 | 10 | 11 | class TestMappingComparison: 12 | 13 | def test_repr(self): 14 | m = MappingComparison({'a': 1}, b=2) 15 | check_repr(m, "'a': 1, 'b': 2") 16 | 17 | def test_repr_ordered(self): 18 | m = MappingComparison((('b', 3), ('a', 1)), ordered=True) 19 | check_repr(m, "'b': 3, 'a': 1") 20 | 21 | def test_repr_long(self): 22 | m = MappingComparison({1: 'a', 2: 'b'*60}) 23 | compare(repr(m)[:65], 24 | expected="\n\n1: 'a',\n2: 'bb") 25 | 26 | def test_repr_after_equal(self): 27 | m = MappingComparison({'a': 1}) 28 | assert m == {'a': 1} 29 | check_repr(m, "'a': 1") 30 | 31 | def test_equal_mapping(self): 32 | m = MappingComparison({'a': 1}) 33 | assert m == {'a': 1} 34 | 35 | def test_equal_sequence(self): 36 | m = MappingComparison(('a', 1), ('b', 2)) 37 | assert m == {'a': 1, 'b': 2} 38 | 39 | def test_equal_items(self): 40 | m = MappingComparison(a=1) 41 | assert m == {'a': 1} 42 | 43 | def test_equal_both(self): 44 | m = MappingComparison({'a': 1, 'b': 2}, b=3) 45 | assert m == {'a': 1, 'b': 3} 46 | 47 | def test_equal_items_ordered(self): 48 | m = MappingComparison(b=3, a=1, ordered=True) 49 | assert m == {'b': 3, 'a': 1} 50 | 51 | def test_equal_ordered_and_dict_supplied(self): 52 | m = MappingComparison({'b': 3, 'a': 1}, ordered=True) 53 | assert m == {'b': 3, 'a': 1} 54 | 55 | def test_equal_ordered_dict_sequence_expected(self): 56 | m = MappingComparison((('a', 1), ('b', 3)), ordered=True) 57 | assert m == OrderedDict((('a', 1), ('b', 3))) 58 | 59 | def test_equal_ordered_dict_ordered_dict_expected(self): 60 | m = MappingComparison(OrderedDict((('a', 1), ('b', 3))), ordered=True) 61 | assert m == OrderedDict((('a', 1), ('b', 3))) 62 | 63 | def test_equal_partial(self): 64 | m = MappingComparison({'a': 1}, partial=True) 65 | assert m == {'a': 1, 'b': 2} 66 | 67 | def test_equal_partial_ordered(self): 68 | m = MappingComparison((('a', 1), ('b', 3)), ordered=True, partial=True) 69 | assert m == OrderedDict((('a', 1), ('c', 2), ('b', 3))) 70 | 71 | def test_unequal_wrong_type(self): 72 | m = MappingComparison({'a': 1}) 73 | assert m != [] 74 | compare(repr(m), 75 | expected="bad type") 76 | 77 | def test_unequal_not_partial(self): 78 | m = MappingComparison({'a': 1, 'b': 2}) 79 | assert m != {'a': 1, 'b': 2, 'c': 3} 80 | check_repr(m, expected=''' 81 | 82 | same: 83 | ['a', 'b'] 84 | 85 | in actual but not expected: 86 | 'c': 3 87 | 88 | ''') 89 | 90 | def test_unequal_keys_and_values(self): 91 | m = MappingComparison({'a': 1, 'b': 2, 'c': 3}) 92 | assert m != {'a': 1, 'c': 4, 'd': 5} 93 | check_repr(m, expected=''' 94 | 95 | same: 96 | ['a'] 97 | 98 | in expected but not actual: 99 | 'b': 2 100 | 101 | in actual but not expected: 102 | 'd': 5 103 | 104 | values differ: 105 | 'c': 3 (expected) != 4 (actual) 106 | 107 | ''') 108 | 109 | def test_unequal_order(self): 110 | m = MappingComparison((('b', 3), ('a', 1)), ordered=True) 111 | assert m != OrderedDict((('a', 1), ('b', 3))) 112 | check_repr(m, expected=''' 113 | 114 | wrong key order: 115 | 116 | same: 117 | [] 118 | 119 | expected: 120 | ['b', 'a'] 121 | 122 | actual: 123 | ['a', 'b'] 124 | 125 | ''') 126 | 127 | def test_unequal_order_recursive(self): 128 | m = MappingComparison(((('b', 'x'), 3), (('b', 'y'), 1)), ordered=True, recursive=True) 129 | assert m != OrderedDict(((('b', 'y'), 1), (('b', 'x'), 3))) 130 | check_repr(m, expected=''' 131 | 132 | wrong key order: 133 | 134 | same: 135 | [] 136 | 137 | expected: 138 | [('b', 'x'), ('b', 'y')] 139 | 140 | actual: 141 | [('b', 'y'), ('b', 'x')] 142 | 143 | While comparing [0]: sequence not as expected: 144 | 145 | same: 146 | ('b',) 147 | 148 | expected: 149 | ('x',) 150 | 151 | actual: 152 | ('y',) 153 | 154 | While comparing [0][1]: 'x' (expected) != 'y' (actual) 155 | 156 | ''') 157 | 158 | def test_unequal_order_wrong(self): 159 | m = MappingComparison(b=3, a=1, ordered=True) 160 | assert m != {'a': 1, 'b': 3} 161 | check_repr(m, expected=''' 162 | 163 | wrong key order: 164 | 165 | same: 166 | [] 167 | 168 | expected: 169 | ['b', 'a'] 170 | 171 | actual: 172 | ['a', 'b'] 173 | 174 | ''') 175 | 176 | def test_unequal_partial_keys_missing(self): 177 | m = MappingComparison({'a': 1, 'b': 2}, partial=True) 178 | assert m != {'a': 1} 179 | check_repr(m, expected=''' 180 | 181 | same: 182 | ['a'] 183 | 184 | in expected but not actual: 185 | 'b': 2 186 | 187 | ''') 188 | 189 | def test_unequal_partial_values_wrong(self): 190 | m = MappingComparison({'a': 1, 'b': 2}, partial=True) 191 | assert m != {'a': 1, 'b': 3} 192 | check_repr(m, expected=''' 193 | 194 | same: 195 | ['a'] 196 | 197 | values differ: 198 | 'b': 2 (expected) != 3 (actual) 199 | 200 | ''') 201 | 202 | def test_unequal_partial_ordered(self): 203 | m = MappingComparison((('b', 3), ('a', 1)), partial=True, ordered=True) 204 | assert m != OrderedDict((('a', 1), ('b', 3))) 205 | check_repr(m, expected=''' 206 | 207 | wrong key order: 208 | 209 | same: 210 | [] 211 | 212 | expected: 213 | ['b', 'a'] 214 | 215 | actual: 216 | ['a', 'b'] 217 | 218 | ''') 219 | 220 | def test_unequal_partial_ordered_some_ignored(self): 221 | m = MappingComparison((('b', 3), ('c', 1), ('a', 1)), partial=True, ordered=True) 222 | assert m != OrderedDict((('b', 3), ('d', 4), ('a', 1), ('c', 1), )) 223 | check_repr(m, expected=''' 224 | 225 | ignored: 226 | ['d'] 227 | 228 | wrong key order: 229 | 230 | same: 231 | ['b'] 232 | 233 | expected: 234 | ['c', 'a'] 235 | 236 | actual: 237 | ['a', 'c'] 238 | 239 | ''') 240 | 241 | def test_unequal_recursive(self): 242 | m = MappingComparison({'a': 1, 'b': {'c': 2}}, recursive=True) 243 | assert m != {'a': 1, 'b': {'c': 3}} 244 | check_repr(m, expected=''' 245 | 246 | same: 247 | ['a'] 248 | 249 | values differ: 250 | 'b': {'c': 2} (expected) != {'c': 3} (actual) 251 | 252 | While comparing ['b']: dict not as expected: 253 | 254 | values differ: 255 | 'c': 2 (expected) != 3 (actual) 256 | 257 | ''') 258 | 259 | def test_everything_wrong(self): 260 | m = MappingComparison((('a', 1), ('b', 2), ('c', 3)), 261 | ordered=True, partial=True, recursive=True) 262 | assert m != OrderedDict((('b', 2), ('a', 1), ('d', 4))) 263 | check_repr(m, expected=''' 264 | 265 | ignored: 266 | ['d'] 267 | 268 | same: 269 | ['a', 'b'] 270 | 271 | in expected but not actual: 272 | 'c': 3 273 | 274 | wrong key order: 275 | 276 | same: 277 | [] 278 | 279 | expected: 280 | ['a', 'b', 'c'] 281 | 282 | actual: 283 | ['b', 'a'] 284 | 285 | While comparing [0]: 'a' (expected) != 'b' (actual) 286 | 287 | ''') 288 | 289 | def test_partial_nothing_specified(self): 290 | m = MappingComparison(partial=True) 291 | assert m == {} 292 | 293 | def test_partial_nothing_specified_wrong_type(self): 294 | m = MappingComparison(partial=True) 295 | assert m != [] 296 | check_repr(m, 'bad type') 297 | 298 | def test_boolean_return(self): 299 | m = MappingComparison({'k': 'v'}) 300 | result = m != {'k': 'v'} 301 | assert isinstance(result, bool) 302 | -------------------------------------------------------------------------------- /testfixtures/tests/test_date.py: -------------------------------------------------------------------------------- 1 | from datetime import date as d, timedelta, date 2 | from time import strptime 3 | from typing import cast 4 | 5 | from testfixtures import ShouldRaise, mock_date, replace, compare 6 | from testfixtures.datetime import MockDate 7 | from testfixtures.tests import sample1, sample2 8 | from unittest import TestCase 9 | 10 | 11 | class TestDate(TestCase): 12 | 13 | # NB: Only the today method is currently stubbed out, 14 | # if you need other methods, tests and patches 15 | # greatfully received! 16 | 17 | @replace('datetime.date', mock_date()) 18 | def test_today(self) -> None: 19 | from datetime import date 20 | compare(date.today(), d(2001, 1, 1)) 21 | compare(date.today(), d(2001, 1, 2)) 22 | compare(date.today(), d(2001, 1, 4)) 23 | 24 | @replace('datetime.date', mock_date(2001, 2, 3)) 25 | def test_today_supplied(self) -> None: 26 | from datetime import date 27 | compare(date.today(), d(2001, 2, 3)) 28 | 29 | @replace('datetime.date', mock_date(year=2001, month=2, day=3)) 30 | def test_today_all_kw(self) -> None: 31 | from datetime import date 32 | compare(date.today(), d(2001, 2, 3)) 33 | 34 | @replace('datetime.date', mock_date(None)) 35 | def test_today_sequence(self, t: type[MockDate]) -> None: 36 | t.add(2002, 1, 1) 37 | t.add(2002, 1, 2) 38 | t.add(2002, 1, 3) 39 | from datetime import date 40 | compare(date.today(), d(2002, 1, 1)) 41 | compare(date.today(), d(2002, 1, 2)) 42 | compare(date.today(), d(2002, 1, 3)) 43 | 44 | @replace('datetime.date', mock_date(None)) 45 | def test_today_requested_longer_than_supplied(self, t: type[MockDate]) -> None: 46 | t.add(2002, 1, 1) 47 | t.add(2002, 1, 2) 48 | from datetime import date 49 | compare(date.today(), d(2002, 1, 1)) 50 | compare(date.today(), d(2002, 1, 2)) 51 | compare(date.today(), d(2002, 1, 3)) 52 | compare(date.today(), d(2002, 1, 5)) 53 | 54 | @replace('datetime.date', mock_date(None)) 55 | def test_add_date_supplied(self) -> None: 56 | from datetime import date 57 | date_mock = cast(type[MockDate], date) 58 | date_mock.add(d(2001, 1, 2)) 59 | date_mock.add(date(2001, 1, 3)) 60 | compare(date.today(), d(2001, 1, 2)) 61 | compare(date.today(), d(2001, 1, 3)) 62 | 63 | def test_instantiate_with_date(self) -> None: 64 | from datetime import date 65 | t = mock_date(date(2002, 1, 1)) 66 | compare(t.today(), d(2002, 1, 1)) 67 | 68 | @replace('datetime.date', mock_date(strict=True)) 69 | def test_call(self, t: type[MockDate]) -> None: 70 | compare(t(2002, 1, 2), d(2002, 1, 2)) 71 | from datetime import date 72 | dt = date(2003, 2, 1) 73 | self.assertFalse(dt.__class__ is d) 74 | compare(dt, d(2003, 2, 1)) 75 | 76 | def test_gotcha_import(self) -> None: 77 | # standard `replace` caveat, make sure you 78 | # patch all relevant places where date 79 | # has been imported: 80 | 81 | @replace('datetime.date', mock_date()) 82 | def test_something() -> None: 83 | from datetime import date 84 | compare(date.today(), d(2001, 1, 1)) 85 | compare(sample1.str_today_1(), '2001-01-02') 86 | 87 | with ShouldRaise(AssertionError) as s: 88 | test_something() 89 | # This convoluted check is because we can't stub 90 | # out the date, since we're testing stubbing out 91 | # the date ;-) 92 | assert s.raised is not None 93 | j, dt1, j, dt2, j = s.raised.args[0].split("'") 94 | # check we can parse the date 95 | strptime(dt1, '%Y-%m-%d') 96 | # check the dt2 bit was as it should be 97 | compare(dt2, '2001-01-02') 98 | 99 | # What you need to do is replace the imported type: 100 | @replace('testfixtures.tests.sample1.date', mock_date()) 101 | def test_something_fixed() -> None: 102 | compare(sample1.str_today_1(), '2001-01-01') 103 | 104 | test_something_fixed() 105 | 106 | def test_gotcha_import_and_obtain(self) -> None: 107 | # Another gotcha is where people have locally obtained 108 | # a class attributes, where the normal patching doesn't 109 | # work: 110 | 111 | @replace('testfixtures.tests.sample1.date', mock_date()) 112 | def test_something() -> None: 113 | compare(sample1.str_today_2(), '2001-01-01') 114 | 115 | with ShouldRaise(AssertionError) as s: 116 | test_something() 117 | # This convoluted check is because we can't stub 118 | # out the date, since we're testing stubbing out 119 | # the date ;-) 120 | assert s.raised is not None 121 | j, dt1, j, dt2, j = s.raised.args[0].split("'") 122 | # check we can parse the date 123 | dt1 = strptime(dt1, '%Y-%m-%d') 124 | # check the dt2 bit was as it should be 125 | compare(dt2, '2001-01-01') 126 | 127 | # What you need to do is replace the imported name: 128 | @replace('testfixtures.tests.sample1.today', mock_date().today) 129 | def test_something_fixed() -> None: 130 | compare(sample1.str_today_2(), '2001-01-01') 131 | 132 | test_something_fixed() 133 | 134 | # if you have an embedded `today` as above, *and* you need to supply 135 | # a list of required dates, then it's often simplest just to 136 | # do a manual try-finally with a replacer: 137 | def test_import_and_obtain_with_lists(self) -> None: 138 | 139 | t = mock_date(None) 140 | t.add(2002, 1, 1) 141 | t.add(2002, 1, 2) 142 | 143 | from testfixtures import Replacer 144 | r = Replacer() 145 | r.replace('testfixtures.tests.sample1.today', t.today) 146 | try: 147 | compare(sample1.str_today_2(), '2002-01-01') 148 | compare(sample1.str_today_2(), '2002-01-02') 149 | finally: 150 | r.restore() 151 | 152 | @replace('datetime.date', mock_date()) 153 | def test_repr(self) -> None: 154 | from datetime import date 155 | compare(repr(date), "") 156 | 157 | @replace('datetime.date', mock_date(delta=2)) 158 | def test_delta(self) -> None: 159 | from datetime import date 160 | compare(date.today(), d(2001, 1, 1)) 161 | compare(date.today(), d(2001, 1, 3)) 162 | compare(date.today(), d(2001, 1, 5)) 163 | 164 | @replace('datetime.date', mock_date(delta_type='weeks')) 165 | def test_delta_type(self) -> None: 166 | from datetime import date 167 | compare(date.today(), d(2001, 1, 1)) 168 | compare(date.today(), d(2001, 1, 8)) 169 | compare(date.today(), d(2001, 1, 22)) 170 | 171 | @replace('datetime.date', mock_date(None)) 172 | def test_set(self) -> None: 173 | from datetime import date 174 | date_mock = cast(type[MockDate], date) 175 | date_mock.set(2001, 1, 2) 176 | compare(date.today(), d(2001, 1, 2)) 177 | date_mock.set(2002, 1, 1) 178 | compare(date.today(), d(2002, 1, 1)) 179 | compare(date.today(), d(2002, 1, 3)) 180 | 181 | @replace('datetime.date', mock_date(None)) 182 | def test_set_date_supplied(self) -> None: 183 | from datetime import date 184 | date_mock = cast(type[MockDate], date) 185 | date_mock.set(d(2001, 1, 2)) 186 | compare(date.today(), d(2001, 1, 2)) 187 | date_mock.set(date(2001, 1, 3)) 188 | compare(date.today(), d(2001, 1, 3)) 189 | 190 | @replace('datetime.date', mock_date(None)) 191 | def test_set_kw(self) -> None: 192 | from datetime import date 193 | date_mock = cast(type[MockDate], date) 194 | date_mock.set(year=2001, month=1, day=2) 195 | compare(date.today(), d(2001, 1, 2)) 196 | 197 | @replace('datetime.date', mock_date(None)) 198 | def test_add_kw(self, t: type[MockDate]) -> None: 199 | t.add(year=2002, month=1, day=1) 200 | from datetime import date 201 | compare(date.today(), d(2002, 1, 1)) 202 | 203 | @replace('datetime.date', mock_date(strict=True)) 204 | def test_isinstance_strict_true(self) -> None: 205 | from datetime import date 206 | date_mock = cast(type[MockDate], date) 207 | to_check = [] 208 | to_check.append(date_mock(1999, 1, 1)) 209 | to_check.append(date_mock.today()) 210 | date_mock.set(2001, 1, 2) 211 | to_check.append(date_mock.today()) 212 | date_mock.add(2001, 1, 3) 213 | to_check.append(date_mock.today()) 214 | to_check.append(date_mock.today()) 215 | date_mock.set(date_mock(2001, 1, 4)) 216 | to_check.append(date_mock.today()) 217 | date_mock.add(date_mock(2001, 1, 5)) 218 | to_check.append(date_mock.today()) 219 | to_check.append(date_mock.today()) 220 | date_mock.set(d(2001, 1, 4)) 221 | to_check.append(date_mock.today()) 222 | date_mock.add(d(2001, 1, 5)) 223 | to_check.append(date_mock.today()) 224 | to_check.append(date_mock.today()) 225 | 226 | for inst in to_check: 227 | self.assertTrue(isinstance(inst, date_mock), inst) 228 | self.assertTrue(inst.__class__ is date_mock, inst) 229 | self.assertTrue(isinstance(inst, d), inst) 230 | self.assertFalse(inst.__class__ is d, inst) 231 | 232 | def test_strict_addition(self) -> None: 233 | mock_d = mock_date(strict=True) 234 | dt = mock_d(2001, 1, 1) + timedelta(days=1) 235 | assert type(dt) is mock_d 236 | 237 | def test_non_strict_addition(self) -> None: 238 | from datetime import date 239 | mock_d = mock_date(strict=False) 240 | dt = mock_d(2001, 1, 1) + timedelta(days=1) 241 | assert type(dt) is date 242 | 243 | def test_strict_add(self) -> None: 244 | mock_d = mock_date(None, strict=True) 245 | mock_d.add(2001, 1, 1) 246 | assert type(mock_d.today()) is mock_d 247 | 248 | def test_non_strict_add(self) -> None: 249 | from datetime import date 250 | mock_d = mock_date(None, strict=False) 251 | mock_d.add(2001, 1, 1) 252 | assert type(mock_d.today()) is date 253 | 254 | @replace('datetime.date', mock_date()) 255 | def test_isinstance_default(self) -> None: 256 | from datetime import date 257 | date_mock = cast(type[MockDate], date) 258 | to_check = [] 259 | to_check.append(date_mock(1999, 1, 1)) 260 | to_check.append(date_mock.today()) 261 | date_mock.set(2001, 1, 2) 262 | to_check.append(date_mock.today()) 263 | date_mock.add(2001, 1, 3) 264 | to_check.append(date_mock.today()) 265 | to_check.append(date_mock.today()) 266 | date_mock.set(date_mock(2001, 1, 4)) 267 | to_check.append(date_mock.today()) 268 | date_mock.add(date_mock(2001, 1, 5)) 269 | to_check.append(date_mock.today()) 270 | to_check.append(date_mock.today()) 271 | date_mock.set(d(2001, 1, 4)) 272 | to_check.append(date_mock.today()) 273 | date_mock.add(d(2001, 1, 5)) 274 | to_check.append(date_mock.today()) 275 | to_check.append(date_mock.today()) 276 | 277 | for inst in to_check: 278 | self.assertFalse(isinstance(inst, date_mock), inst) 279 | self.assertFalse(inst.__class__ is date_mock, inst) 280 | self.assertTrue(isinstance(inst, d), inst) 281 | self.assertTrue(inst.__class__ is d, inst) 282 | 283 | def test_tick_when_static(self) -> None: 284 | date = mock_date(delta=0) 285 | compare(date.today(), expected=d(2001, 1, 1)) 286 | date.tick(days=1) 287 | compare(date.today(), expected=d(2001, 1, 2)) 288 | 289 | def test_tick_when_dynamic(self) -> None: 290 | # hopefully not that common? 291 | date = mock_date() 292 | compare(date.today(), expected=d(2001, 1, 1)) 293 | date.tick(days=1) 294 | compare(date.today(), expected=d(2001, 1, 3)) 295 | 296 | def test_tick_with_timedelta_instance(self) -> None: 297 | date = mock_date(delta=0) 298 | compare(date.today(), expected=d(2001, 1, 1)) 299 | date.tick(timedelta(days=1)) 300 | compare(date.today(), expected=d(2001, 1, 2)) 301 | 302 | def test_old_import(self) -> None: 303 | from testfixtures import test_date 304 | assert test_date is mock_date 305 | 306 | def test_add_timedelta_not_strict(self) -> None: 307 | mock_class = mock_date() 308 | value = mock_class.today() + timedelta(days=1) 309 | assert isinstance(value, date) 310 | assert type(value) is date 311 | 312 | def test_add_timedelta_strict(self) -> None: 313 | mock_class = mock_date(strict=True) 314 | value = mock_class.today() + timedelta(days=1) 315 | assert isinstance(value, date) 316 | assert type(value) is mock_class 317 | --------------------------------------------------------------------------------